id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
22,119 | import sys
import re
import numpy as np
import cv2
import torch
from PIL import Image
from .pallete import get_mask_pallete
The provided code snippet includes necessary dependencies for implementing the `resize_depth` function. Write a Python function `def resize_depth(depth, width, height)` to solve the following problem:
Resize depth map and bring to CPU (numpy). Args: depth (tensor): depth width (int): image width height (int): image height Returns: array: processed depth
Here is the function:
def resize_depth(depth, width, height):
"""Resize depth map and bring to CPU (numpy).
Args:
depth (tensor): depth
width (int): image width
height (int): image height
Returns:
array: processed depth
"""
depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
depth_resized = cv2.resize(
depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
)
return depth_resized | Resize depth map and bring to CPU (numpy). Args: depth (tensor): depth width (int): image width height (int): image height Returns: array: processed depth |
22,120 | import os
import glob
import cv2
import argparse
import torch
import torch.nn.functional as F
import util.io
from torchvision.transforms import Compose
from dpt.models import DPTSegmentationModel
from dpt.transforms import Resize, NormalizeImage, PrepareForNet
class DPTSegmentationModel(DPT):
def __init__(self, num_classes, path=None, **kwargs):
features = kwargs["features"] if "features" in kwargs else 256
kwargs["use_bn"] = True
head = nn.Sequential(
nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(features),
nn.ReLU(True),
nn.Dropout(0.1, False),
nn.Conv2d(features, num_classes, kernel_size=1),
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
)
super().__init__(head, **kwargs)
self.auxlayer = nn.Sequential(
nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(features),
nn.ReLU(True),
nn.Dropout(0.1, False),
nn.Conv2d(features, num_classes, kernel_size=1),
)
if path is not None:
self.load(path)
class Resize(object):
"""Resize sample to given size (width, height)."""
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method="lower_bound",
image_interpolation_method=cv2.INTER_AREA,
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
self.__width = width
self.__height = height
self.__resize_target = resize_target
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method
self.__image_interpolation_method = image_interpolation_method
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
if max_val is not None and y > max_val:
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
if y < min_val:
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
return y
def get_size(self, width, height):
# determine new height and width
scale_height = self.__height / height
scale_width = self.__width / width
if self.__keep_aspect_ratio:
if self.__resize_method == "lower_bound":
# scale such that output size is lower bound
if scale_width > scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "upper_bound":
# scale such that output size is upper bound
if scale_width < scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "minimal":
# scale as least as possbile
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
else:
raise ValueError(
f"resize_method {self.__resize_method} not implemented"
)
if self.__resize_method == "lower_bound":
new_height = self.constrain_to_multiple_of(
scale_height * height, min_val=self.__height
)
new_width = self.constrain_to_multiple_of(
scale_width * width, min_val=self.__width
)
elif self.__resize_method == "upper_bound":
new_height = self.constrain_to_multiple_of(
scale_height * height, max_val=self.__height
)
new_width = self.constrain_to_multiple_of(
scale_width * width, max_val=self.__width
)
elif self.__resize_method == "minimal":
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
else:
raise ValueError(f"resize_method {self.__resize_method} not implemented")
return (new_width, new_height)
def __call__(self, sample):
width, height = self.get_size(
sample["image"].shape[1], sample["image"].shape[0]
)
# resize sample
sample["image"] = cv2.resize(
sample["image"],
(width, height),
interpolation=self.__image_interpolation_method,
)
if self.__resize_target:
if "disparity" in sample:
sample["disparity"] = cv2.resize(
sample["disparity"],
(width, height),
interpolation=cv2.INTER_NEAREST,
)
if "depth" in sample:
sample["depth"] = cv2.resize(
sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
)
sample["mask"] = cv2.resize(
sample["mask"].astype(np.float32),
(width, height),
interpolation=cv2.INTER_NEAREST,
)
sample["mask"] = sample["mask"].astype(bool)
return sample
class NormalizeImage(object):
"""Normlize image by given mean and std."""
def __init__(self, mean, std):
self.__mean = mean
self.__std = std
def __call__(self, sample):
sample["image"] = (sample["image"] - self.__mean) / self.__std
return sample
class PrepareForNet(object):
"""Prepare sample for usage as network input."""
def __init__(self):
pass
def __call__(self, sample):
image = np.transpose(sample["image"], (2, 0, 1))
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
if "mask" in sample:
sample["mask"] = sample["mask"].astype(np.float32)
sample["mask"] = np.ascontiguousarray(sample["mask"])
if "disparity" in sample:
disparity = sample["disparity"].astype(np.float32)
sample["disparity"] = np.ascontiguousarray(disparity)
if "depth" in sample:
depth = sample["depth"].astype(np.float32)
sample["depth"] = np.ascontiguousarray(depth)
return sample
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(input_path, output_path, model_path, model_type="dpt_hybrid", optimize=True)` to solve the following problem:
Run segmentation network Args: input_path (str): path to input folder output_path (str): path to output folder model_path (str): path to saved model
Here is the function:
def run(input_path, output_path, model_path, model_type="dpt_hybrid", optimize=True):
"""Run segmentation network
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
net_w = net_h = 480
# load network
if model_type == "dpt_large":
model = DPTSegmentationModel(
150,
path=model_path,
backbone="vitl16_384",
)
elif model_type == "dpt_hybrid":
model = DPTSegmentationModel(
150,
path=model_path,
backbone="vitb_rn50_384",
)
else:
assert (
False
), f"model_type '{model_type}' not implemented, use: --model_type [dpt_large|dpt_hybrid]"
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="minimal",
image_interpolation_method=cv2.INTER_CUBIC,
),
NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
PrepareForNet(),
]
)
model.eval()
if optimize == True and device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# get input
img_names = glob.glob(os.path.join(input_path, "*"))
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
for ind, img_name in enumerate(img_names):
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = util.io.read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize == True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
out = model.forward(sample)
prediction = torch.nn.functional.interpolate(
out, size=img.shape[:2], mode="bicubic", align_corners=False
)
prediction = torch.argmax(prediction, dim=1) + 1
prediction = prediction.squeeze().cpu().numpy()
# output
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
util.io.write_segm_img(filename, img, prediction, alpha=0.5)
print("finished") | Run segmentation network Args: input_path (str): path to input folder output_path (str): path to output folder model_path (str): path to saved model |
22,121 | import torch
import os
import json
import copy
import numpy as np
from PIL import Image
from random import randint
from tqdm import tqdm
from diff_gaussian_rasterization import GaussianRasterizer as Renderer
from helpers import setup_camera, l1_loss_v1, l1_loss_v2, weighted_l2_loss_v1, weighted_l2_loss_v2, quat_mult, \
o3d_knn, params2rendervar, params2cpu, save_params
from external import calc_ssim, calc_psnr, build_rotation, densify, update_params_and_optimizer
def get_dataset(t, md, seq):
dataset = []
for c in range(len(md['fn'][t])):
w, h, k, w2c = md['w'], md['h'], md['k'][t][c], md['w2c'][t][c]
cam = setup_camera(w, h, k, w2c, near=1.0, far=100)
fn = md['fn'][t][c]
im = np.array(copy.deepcopy(Image.open(f"./data/{seq}/ims/{fn}")))
im = torch.tensor(im).float().cuda().permute(2, 0, 1) / 255
seg = np.array(copy.deepcopy(Image.open(f"./data/{seq}/seg/{fn.replace('.jpg', '.png')}"))).astype(np.float32)
seg = torch.tensor(seg).float().cuda()
seg_col = torch.stack((seg, torch.zeros_like(seg), 1 - seg))
dataset.append({'cam': cam, 'im': im, 'seg': seg_col, 'id': c})
return dataset
def get_batch(todo_dataset, dataset):
if not todo_dataset:
todo_dataset = dataset.copy()
curr_data = todo_dataset.pop(randint(0, len(todo_dataset) - 1))
return curr_data
def initialize_params(seq, md):
init_pt_cld = np.load(f"./data/{seq}/init_pt_cld.npz")["data"]
seg = init_pt_cld[:, 6]
max_cams = 50
sq_dist, _ = o3d_knn(init_pt_cld[:, :3], 3)
mean3_sq_dist = sq_dist.mean(-1).clip(min=0.0000001)
params = {
'means3D': init_pt_cld[:, :3],
'rgb_colors': init_pt_cld[:, 3:6],
'seg_colors': np.stack((seg, np.zeros_like(seg), 1 - seg), -1),
'unnorm_rotations': np.tile([1, 0, 0, 0], (seg.shape[0], 1)),
'logit_opacities': np.zeros((seg.shape[0], 1)),
'log_scales': np.tile(np.log(np.sqrt(mean3_sq_dist))[..., None], (1, 3)),
'cam_m': np.zeros((max_cams, 3)),
'cam_c': np.zeros((max_cams, 3)),
}
params = {k: torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True)) for k, v in
params.items()}
cam_centers = np.linalg.inv(md['w2c'][0])[:, :3, 3] # Get scene radius
scene_radius = 1.1 * np.max(np.linalg.norm(cam_centers - np.mean(cam_centers, 0)[None], axis=-1))
variables = {'max_2D_radius': torch.zeros(params['means3D'].shape[0]).cuda().float(),
'scene_radius': scene_radius,
'means2D_gradient_accum': torch.zeros(params['means3D'].shape[0]).cuda().float(),
'denom': torch.zeros(params['means3D'].shape[0]).cuda().float()}
return params, variables
def initialize_optimizer(params, variables):
lrs = {
'means3D': 0.00016 * variables['scene_radius'],
'rgb_colors': 0.0025,
'seg_colors': 0.0,
'unnorm_rotations': 0.001,
'logit_opacities': 0.05,
'log_scales': 0.001,
'cam_m': 1e-4,
'cam_c': 1e-4,
}
param_groups = [{'params': [v], 'name': k, 'lr': lrs[k]} for k, v in params.items()]
return torch.optim.Adam(param_groups, lr=0.0, eps=1e-15)
def get_loss(params, curr_data, variables, is_initial_timestep):
losses = {}
rendervar = params2rendervar(params)
rendervar['means2D'].retain_grad()
im, radius, _, = Renderer(raster_settings=curr_data['cam'])(**rendervar)
curr_id = curr_data['id']
im = torch.exp(params['cam_m'][curr_id])[:, None, None] * im + params['cam_c'][curr_id][:, None, None]
losses['im'] = 0.8 * l1_loss_v1(im, curr_data['im']) + 0.2 * (1.0 - calc_ssim(im, curr_data['im']))
variables['means2D'] = rendervar['means2D'] # Gradient only accum from colour render for densification
segrendervar = params2rendervar(params)
segrendervar['colors_precomp'] = params['seg_colors']
seg, _, _, = Renderer(raster_settings=curr_data['cam'])(**segrendervar)
losses['seg'] = 0.8 * l1_loss_v1(seg, curr_data['seg']) + 0.2 * (1.0 - calc_ssim(seg, curr_data['seg']))
if not is_initial_timestep:
is_fg = (params['seg_colors'][:, 0] > 0.5).detach()
fg_pts = rendervar['means3D'][is_fg]
fg_rot = rendervar['rotations'][is_fg]
rel_rot = quat_mult(fg_rot, variables["prev_inv_rot_fg"])
rot = build_rotation(rel_rot)
neighbor_pts = fg_pts[variables["neighbor_indices"]]
curr_offset = neighbor_pts - fg_pts[:, None]
curr_offset_in_prev_coord = (rot.transpose(2, 1)[:, None] @ curr_offset[:, :, :, None]).squeeze(-1)
losses['rigid'] = weighted_l2_loss_v2(curr_offset_in_prev_coord, variables["prev_offset"],
variables["neighbor_weight"])
losses['rot'] = weighted_l2_loss_v2(rel_rot[variables["neighbor_indices"]], rel_rot[:, None],
variables["neighbor_weight"])
curr_offset_mag = torch.sqrt((curr_offset ** 2).sum(-1) + 1e-20)
losses['iso'] = weighted_l2_loss_v1(curr_offset_mag, variables["neighbor_dist"], variables["neighbor_weight"])
losses['floor'] = torch.clamp(fg_pts[:, 1], min=0).mean()
bg_pts = rendervar['means3D'][~is_fg]
bg_rot = rendervar['rotations'][~is_fg]
losses['bg'] = l1_loss_v2(bg_pts, variables["init_bg_pts"]) + l1_loss_v2(bg_rot, variables["init_bg_rot"])
losses['soft_col_cons'] = l1_loss_v2(params['rgb_colors'], variables["prev_col"])
loss_weights = {'im': 1.0, 'seg': 3.0, 'rigid': 4.0, 'rot': 4.0, 'iso': 2.0, 'floor': 2.0, 'bg': 20.0,
'soft_col_cons': 0.01}
loss = sum([loss_weights[k] * v for k, v in losses.items()])
seen = radius > 0
variables['max_2D_radius'][seen] = torch.max(radius[seen], variables['max_2D_radius'][seen])
variables['seen'] = seen
return loss, variables
def initialize_per_timestep(params, variables, optimizer):
pts = params['means3D']
rot = torch.nn.functional.normalize(params['unnorm_rotations'])
new_pts = pts + (pts - variables["prev_pts"])
new_rot = torch.nn.functional.normalize(rot + (rot - variables["prev_rot"]))
is_fg = params['seg_colors'][:, 0] > 0.5
prev_inv_rot_fg = rot[is_fg]
prev_inv_rot_fg[:, 1:] = -1 * prev_inv_rot_fg[:, 1:]
fg_pts = pts[is_fg]
prev_offset = fg_pts[variables["neighbor_indices"]] - fg_pts[:, None]
variables['prev_inv_rot_fg'] = prev_inv_rot_fg.detach()
variables['prev_offset'] = prev_offset.detach()
variables["prev_col"] = params['rgb_colors'].detach()
variables["prev_pts"] = pts.detach()
variables["prev_rot"] = rot.detach()
new_params = {'means3D': new_pts, 'unnorm_rotations': new_rot}
params = update_params_and_optimizer(new_params, params, optimizer)
return params, variables
def initialize_post_first_timestep(params, variables, optimizer, num_knn=20):
is_fg = params['seg_colors'][:, 0] > 0.5
init_fg_pts = params['means3D'][is_fg]
init_bg_pts = params['means3D'][~is_fg]
init_bg_rot = torch.nn.functional.normalize(params['unnorm_rotations'][~is_fg])
neighbor_sq_dist, neighbor_indices = o3d_knn(init_fg_pts.detach().cpu().numpy(), num_knn)
neighbor_weight = np.exp(-2000 * neighbor_sq_dist)
neighbor_dist = np.sqrt(neighbor_sq_dist)
variables["neighbor_indices"] = torch.tensor(neighbor_indices).cuda().long().contiguous()
variables["neighbor_weight"] = torch.tensor(neighbor_weight).cuda().float().contiguous()
variables["neighbor_dist"] = torch.tensor(neighbor_dist).cuda().float().contiguous()
variables["init_bg_pts"] = init_bg_pts.detach()
variables["init_bg_rot"] = init_bg_rot.detach()
variables["prev_pts"] = params['means3D'].detach()
variables["prev_rot"] = torch.nn.functional.normalize(params['unnorm_rotations']).detach()
params_to_fix = ['logit_opacities', 'log_scales', 'cam_m', 'cam_c']
for param_group in optimizer.param_groups:
if param_group["name"] in params_to_fix:
param_group['lr'] = 0.0
return variables
def report_progress(params, data, i, progress_bar, every_i=100):
if i % every_i == 0:
im, _, _, = Renderer(raster_settings=data['cam'])(**params2rendervar(params))
curr_id = data['id']
im = torch.exp(params['cam_m'][curr_id])[:, None, None] * im + params['cam_c'][curr_id][:, None, None]
psnr = calc_psnr(im, data['im']).mean()
progress_bar.set_postfix({"train img 0 PSNR": f"{psnr:.{7}f}"})
progress_bar.update(every_i)
def params2cpu(params, is_initial_timestep):
if is_initial_timestep:
res = {k: v.detach().cpu().contiguous().numpy() for k, v in params.items()}
else:
res = {k: v.detach().cpu().contiguous().numpy() for k, v in params.items() if
k in ['means3D', 'rgb_colors', 'unnorm_rotations']}
return res
def save_params(output_params, seq, exp):
to_save = {}
for k in output_params[0].keys():
if k in output_params[1].keys():
to_save[k] = np.stack([params[k] for params in output_params])
else:
to_save[k] = output_params[0][k]
os.makedirs(f"./output/{exp}/{seq}", exist_ok=True)
np.savez(f"./output/{exp}/{seq}/params", **to_save)
def densify(params, variables, optimizer, i):
if i <= 5000:
variables = accumulate_mean2d_gradient(variables)
grad_thresh = 0.0002
if (i >= 500) and (i % 100 == 0):
grads = variables['means2D_gradient_accum'] / variables['denom']
grads[grads.isnan()] = 0.0
to_clone = torch.logical_and(grads >= grad_thresh, (
torch.max(torch.exp(params['log_scales']), dim=1).values <= 0.01 * variables['scene_radius']))
new_params = {k: v[to_clone] for k, v in params.items() if k not in ['cam_m', 'cam_c']}
params = cat_params_to_optimizer(new_params, params, optimizer)
num_pts = params['means3D'].shape[0]
padded_grad = torch.zeros(num_pts, device="cuda")
padded_grad[:grads.shape[0]] = grads
to_split = torch.logical_and(padded_grad >= grad_thresh,
torch.max(torch.exp(params['log_scales']), dim=1).values > 0.01 * variables[
'scene_radius'])
n = 2 # number to split into
new_params = {k: v[to_split].repeat(n, 1) for k, v in params.items() if k not in ['cam_m', 'cam_c']}
stds = torch.exp(params['log_scales'])[to_split].repeat(n, 1)
means = torch.zeros((stds.size(0), 3), device="cuda")
samples = torch.normal(mean=means, std=stds)
rots = build_rotation(params['unnorm_rotations'][to_split]).repeat(n, 1, 1)
new_params['means3D'] += torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1)
new_params['log_scales'] = torch.log(torch.exp(new_params['log_scales']) / (0.8 * n))
params = cat_params_to_optimizer(new_params, params, optimizer)
num_pts = params['means3D'].shape[0]
variables['means2D_gradient_accum'] = torch.zeros(num_pts, device="cuda")
variables['denom'] = torch.zeros(num_pts, device="cuda")
variables['max_2D_radius'] = torch.zeros(num_pts, device="cuda")
to_remove = torch.cat((to_split, torch.zeros(n * to_split.sum(), dtype=torch.bool, device="cuda")))
params, variables = remove_points(to_remove, params, variables, optimizer)
remove_threshold = 0.25 if i == 5000 else 0.005
to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()
if i >= 3000:
big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']
to_remove = torch.logical_or(to_remove, big_points_ws)
params, variables = remove_points(to_remove, params, variables, optimizer)
torch.cuda.empty_cache()
if i > 0 and i % 3000 == 0:
new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}
params = update_params_and_optimizer(new_params, params, optimizer)
return params, variables
def train(seq, exp):
if os.path.exists(f"./output/{exp}/{seq}"):
print(f"Experiment '{exp}' for sequence '{seq}' already exists. Exiting.")
return
md = json.load(open(f"./data/{seq}/train_meta.json", 'r')) # metadata
num_timesteps = len(md['fn'])
params, variables = initialize_params(seq, md)
optimizer = initialize_optimizer(params, variables)
output_params = []
for t in range(num_timesteps):
dataset = get_dataset(t, md, seq)
todo_dataset = []
is_initial_timestep = (t == 0)
if not is_initial_timestep:
params, variables = initialize_per_timestep(params, variables, optimizer)
num_iter_per_timestep = 10000 if is_initial_timestep else 2000
progress_bar = tqdm(range(num_iter_per_timestep), desc=f"timestep {t}")
for i in range(num_iter_per_timestep):
curr_data = get_batch(todo_dataset, dataset)
loss, variables = get_loss(params, curr_data, variables, is_initial_timestep)
loss.backward()
with torch.no_grad():
report_progress(params, dataset[0], i, progress_bar)
if is_initial_timestep:
params, variables = densify(params, variables, optimizer, i)
optimizer.step()
optimizer.zero_grad(set_to_none=True)
progress_bar.close()
output_params.append(params2cpu(params, is_initial_timestep))
if is_initial_timestep:
variables = initialize_post_first_timestep(params, variables, optimizer)
save_params(output_params, seq, exp) | null |
22,122 | import torch
import numpy as np
import open3d as o3d
import time
from diff_gaussian_rasterization import GaussianRasterizer as Renderer
from helpers import setup_camera, quat_mult
from external import build_rotation
from colormap import colormap
from copy import deepcopy
RENDER_MODE = 'color'
ADDITIONAL_LINES = None
FORCE_LOOP = False
w, h = 640, 360
view_scale = 3.9
fps = 20
traj_length = 15
def init_camera(y_angle=0., center_dist=2.4, cam_height=1.3, f_ratio=0.82):
def load_scene_data(seq, exp, seg_as_col=False):
def calculate_trajectories(scene_data, is_fg):
def calculate_rot_vec(scene_data, is_fg):
def render(w2c, k, timestep_data):
def rgbd2pcd(im, depth, w2c, k, show_depth=False, project_to_cam_w_scale=None):
def visualize(seq, exp):
scene_data, is_fg = load_scene_data(seq, exp)
vis = o3d.visualization.Visualizer()
vis.create_window(width=int(w * view_scale), height=int(h * view_scale), visible=True)
w2c, k = init_camera()
im, depth = render(w2c, k, scene_data[0])
init_pts, init_cols = rgbd2pcd(im, depth, w2c, k, show_depth=(RENDER_MODE == 'depth'))
pcd = o3d.geometry.PointCloud()
pcd.points = init_pts
pcd.colors = init_cols
vis.add_geometry(pcd)
linesets = None
lines = None
if ADDITIONAL_LINES is not None:
if ADDITIONAL_LINES == 'trajectories':
linesets = calculate_trajectories(scene_data, is_fg)
elif ADDITIONAL_LINES == 'rotations':
linesets = calculate_rot_vec(scene_data, is_fg)
lines = o3d.geometry.LineSet()
lines.points = linesets[0].points
lines.colors = linesets[0].colors
lines.lines = linesets[0].lines
vis.add_geometry(lines)
view_k = k * view_scale
view_k[2, 2] = 1
view_control = vis.get_view_control()
cparams = o3d.camera.PinholeCameraParameters()
cparams.extrinsic = w2c
cparams.intrinsic.intrinsic_matrix = view_k
cparams.intrinsic.height = int(h * view_scale)
cparams.intrinsic.width = int(w * view_scale)
view_control.convert_from_pinhole_camera_parameters(cparams, allow_arbitrary=True)
render_options = vis.get_render_option()
render_options.point_size = view_scale
render_options.light_on = False
start_time = time.time()
num_timesteps = len(scene_data)
while True:
passed_time = time.time() - start_time
passed_frames = passed_time * fps
if ADDITIONAL_LINES == 'trajectories':
t = int(passed_frames % (num_timesteps - traj_length)) + traj_length # Skip t that don't have full traj.
else:
t = int(passed_frames % num_timesteps)
if FORCE_LOOP:
num_loops = 1.4
y_angle = 360*t*num_loops / num_timesteps
w2c, k = init_camera(y_angle)
cam_params = view_control.convert_to_pinhole_camera_parameters()
cam_params.extrinsic = w2c
view_control.convert_from_pinhole_camera_parameters(cam_params, allow_arbitrary=True)
else: # Interactive control
cam_params = view_control.convert_to_pinhole_camera_parameters()
view_k = cam_params.intrinsic.intrinsic_matrix
k = view_k / view_scale
k[2, 2] = 1
w2c = cam_params.extrinsic
if RENDER_MODE == 'centers':
pts = o3d.utility.Vector3dVector(scene_data[t]['means3D'].contiguous().double().cpu().numpy())
cols = o3d.utility.Vector3dVector(scene_data[t]['colors_precomp'].contiguous().double().cpu().numpy())
else:
im, depth = render(w2c, k, scene_data[t])
pts, cols = rgbd2pcd(im, depth, w2c, k, show_depth=(RENDER_MODE == 'depth'))
pcd.points = pts
pcd.colors = cols
vis.update_geometry(pcd)
if ADDITIONAL_LINES is not None:
if ADDITIONAL_LINES == 'trajectories':
lt = t - traj_length
else:
lt = t
lines.points = linesets[lt].points
lines.colors = linesets[lt].colors
lines.lines = linesets[lt].lines
vis.update_geometry(lines)
if not vis.poll_events():
break
vis.update_renderer()
vis.destroy_window()
del view_control
del vis
del render_options | null |
22,123 | import torch
import torch.nn.functional as func
from torch.autograd import Variable
from math import exp
def calc_mse(img1, img2):
return ((img1 - img2) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True) | null |
22,124 | import torch
import math
from typing import Type, Dict, Any, Tuple, Callable
from . import merge
from .utils import isinstance_str, init_generator
def make_tome_block(block_class: Type[torch.nn.Module]) -> Type[torch.nn.Module]:
"""
Make a patched class on the fly so we don't have to import any specific modules.
This patch applies ToMe to the forward function of the block.
"""
class ToMeBlock(block_class):
# Save for unpatching later
_parent = block_class
def _forward(self, x: torch.Tensor, context: torch.Tensor = None) -> torch.Tensor:
m_a, m_c, m_m, u_a, u_c, u_m = compute_merge(x, self._tome_info)
# This is where the meat of the computation happens
x = u_a(self.attn1(m_a(self.norm1(x)), context=context if self.disable_self_attn else None)) + x
x = u_c(self.attn2(m_c(self.norm2(x)), context=context)) + x
x = u_m(self.ff(m_m(self.norm3(x)))) + x
return x
return ToMeBlock
def make_diffusers_tome_block(block_class: Type[torch.nn.Module]) -> Type[torch.nn.Module]:
"""
Make a patched class for a diffusers model.
This patch applies ToMe to the forward function of the block.
"""
class ToMeBlock(block_class):
# Save for unpatching later
_parent = block_class
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
timestep=None,
cross_attention_kwargs=None,
class_labels=None,
) -> torch.Tensor:
# (1) ToMe
m_a, m_c, m_m, u_a, u_c, u_m = compute_merge(hidden_states, self._tome_info)
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
elif self.use_ada_layer_norm_zero:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
else:
norm_hidden_states = self.norm1(hidden_states)
# (2) ToMe m_a
norm_hidden_states = m_a(norm_hidden_states)
# 1. Self-Attention
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.use_ada_layer_norm_zero:
attn_output = gate_msa.unsqueeze(1) * attn_output
# (3) ToMe u_a
hidden_states = u_a(attn_output) + hidden_states
if self.attn2 is not None:
norm_hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
# (4) ToMe m_c
norm_hidden_states = m_c(norm_hidden_states)
# 2. Cross-Attention
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
# (5) ToMe u_c
hidden_states = u_c(attn_output) + hidden_states
# 3. Feed-forward
norm_hidden_states = self.norm3(hidden_states)
if self.use_ada_layer_norm_zero:
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
# (6) ToMe m_m
norm_hidden_states = m_m(norm_hidden_states)
ff_output = self.ff(norm_hidden_states)
if self.use_ada_layer_norm_zero:
ff_output = gate_mlp.unsqueeze(1) * ff_output
# (7) ToMe u_m
hidden_states = u_m(ff_output) + hidden_states
return hidden_states
return ToMeBlock
def hook_tome_model(model: torch.nn.Module):
""" Adds a forward pre hook to get the image size. This hook can be removed with remove_patch. """
def hook(module, args):
module._tome_info["size"] = (args[0].shape[2], args[0].shape[3])
return None
model._tome_info["hooks"].append(model.register_forward_pre_hook(hook))
def remove_patch(model: torch.nn.Module):
""" Removes a patch from a ToMe Diffusion module if it was already patched. """
# For diffusers
model = model.unet if hasattr(model, "unet") else model
for _, module in model.named_modules():
if hasattr(module, "_tome_info"):
for hook in module._tome_info["hooks"]:
hook.remove()
module._tome_info["hooks"].clear()
if module.__class__.__name__ == "ToMeBlock":
module.__class__ = module._parent
return model
def isinstance_str(x: object, cls_name: str):
"""
Checks whether x has any class *named* cls_name in its ancestry.
Doesn't require access to the class's implementation.
Useful for patching!
"""
for _cls in x.__class__.__mro__:
if _cls.__name__ == cls_name:
return True
return False
The provided code snippet includes necessary dependencies for implementing the `apply_patch` function. Write a Python function `def apply_patch( model: torch.nn.Module, ratio: float = 0.5, max_downsample: int = 1, sx: int = 2, sy: int = 2, use_rand: bool = True, merge_attn: bool = True, merge_crossattn: bool = False, merge_mlp: bool = False)` to solve the following problem:
Patches a stable diffusion model with ToMe. Apply this to the highest level stable diffusion object (i.e., it should have a .model.diffusion_model). Important Args: - model: A top level Stable Diffusion module to patch in place. Should have a ".model.diffusion_model" - ratio: The ratio of tokens to merge. I.e., 0.4 would reduce the total number of tokens by 40%. The maximum value for this is 1-(1/(sx*sy)). By default, the max is 0.75 (I recommend <= 0.5 though). Higher values result in more speed-up, but with more visual quality loss. Args to tinker with if you want: - max_downsample [1, 2, 4, or 8]: Apply ToMe to layers with at most this amount of downsampling. E.g., 1 only applies to layers with no downsampling (4/15) while 8 applies to all layers (15/15). I recommend a value of 1 or 2. - sx, sy: The stride for computing dst sets (see paper). A higher stride means you can merge more tokens, but the default of (2, 2) works well in most cases. Doesn't have to divide image size. - use_rand: Whether or not to allow random perturbations when computing dst sets (see paper). Usually you'd want to leave this on, but if you're having weird artifacts try turning this off. - merge_attn: Whether or not to merge tokens for attention (recommended). - merge_crossattn: Whether or not to merge tokens for cross attention (not recommended). - merge_mlp: Whether or not to merge tokens for the mlp layers (very not recommended).
Here is the function:
def apply_patch(
model: torch.nn.Module,
ratio: float = 0.5,
max_downsample: int = 1,
sx: int = 2, sy: int = 2,
use_rand: bool = True,
merge_attn: bool = True,
merge_crossattn: bool = False,
merge_mlp: bool = False):
"""
Patches a stable diffusion model with ToMe.
Apply this to the highest level stable diffusion object (i.e., it should have a .model.diffusion_model).
Important Args:
- model: A top level Stable Diffusion module to patch in place. Should have a ".model.diffusion_model"
- ratio: The ratio of tokens to merge. I.e., 0.4 would reduce the total number of tokens by 40%.
The maximum value for this is 1-(1/(sx*sy)). By default, the max is 0.75 (I recommend <= 0.5 though).
Higher values result in more speed-up, but with more visual quality loss.
Args to tinker with if you want:
- max_downsample [1, 2, 4, or 8]: Apply ToMe to layers with at most this amount of downsampling.
E.g., 1 only applies to layers with no downsampling (4/15) while
8 applies to all layers (15/15). I recommend a value of 1 or 2.
- sx, sy: The stride for computing dst sets (see paper). A higher stride means you can merge more tokens,
but the default of (2, 2) works well in most cases. Doesn't have to divide image size.
- use_rand: Whether or not to allow random perturbations when computing dst sets (see paper). Usually
you'd want to leave this on, but if you're having weird artifacts try turning this off.
- merge_attn: Whether or not to merge tokens for attention (recommended).
- merge_crossattn: Whether or not to merge tokens for cross attention (not recommended).
- merge_mlp: Whether or not to merge tokens for the mlp layers (very not recommended).
"""
# Make sure the module is not currently patched
remove_patch(model)
is_diffusers = isinstance_str(model, "DiffusionPipeline") or isinstance_str(model, "ModelMixin")
if not is_diffusers:
if not hasattr(model, "model") or not hasattr(model.model, "diffusion_model"):
# Provided model not supported
raise RuntimeError("Provided model was not a Stable Diffusion / Latent Diffusion model, as expected.")
diffusion_model = model.model.diffusion_model
else:
# Supports "pipe.unet" and "unet"
diffusion_model = model.unet if hasattr(model, "unet") else model
diffusion_model._tome_info = {
"size": None,
"hooks": [],
"args": {
"ratio": ratio,
"max_downsample": max_downsample,
"sx": sx, "sy": sy,
"use_rand": use_rand,
"generator": None,
"merge_attn": merge_attn,
"merge_crossattn": merge_crossattn,
"merge_mlp": merge_mlp
}
}
hook_tome_model(diffusion_model)
for _, module in diffusion_model.named_modules():
# If for some reason this has a different name, create an issue and I'll fix it
if isinstance_str(module, "BasicTransformerBlock"):
make_tome_block_fn = make_diffusers_tome_block if is_diffusers else make_tome_block
module.__class__ = make_tome_block_fn(module.__class__)
module._tome_info = diffusion_model._tome_info
# Something introduced in SD 2.0 (LDM only)
if not hasattr(module, "disable_self_attn") and not is_diffusers:
module.disable_self_attn = False
# Something needed for older versions of diffusers
if not hasattr(module, "use_ada_layer_norm_zero") and is_diffusers:
module.use_ada_layer_norm = False
module.use_ada_layer_norm_zero = False
return model | Patches a stable diffusion model with ToMe. Apply this to the highest level stable diffusion object (i.e., it should have a .model.diffusion_model). Important Args: - model: A top level Stable Diffusion module to patch in place. Should have a ".model.diffusion_model" - ratio: The ratio of tokens to merge. I.e., 0.4 would reduce the total number of tokens by 40%. The maximum value for this is 1-(1/(sx*sy)). By default, the max is 0.75 (I recommend <= 0.5 though). Higher values result in more speed-up, but with more visual quality loss. Args to tinker with if you want: - max_downsample [1, 2, 4, or 8]: Apply ToMe to layers with at most this amount of downsampling. E.g., 1 only applies to layers with no downsampling (4/15) while 8 applies to all layers (15/15). I recommend a value of 1 or 2. - sx, sy: The stride for computing dst sets (see paper). A higher stride means you can merge more tokens, but the default of (2, 2) works well in most cases. Doesn't have to divide image size. - use_rand: Whether or not to allow random perturbations when computing dst sets (see paper). Usually you'd want to leave this on, but if you're having weird artifacts try turning this off. - merge_attn: Whether or not to merge tokens for attention (recommended). - merge_crossattn: Whether or not to merge tokens for cross attention (not recommended). - merge_mlp: Whether or not to merge tokens for the mlp layers (very not recommended). |
22,125 | import itertools
import time
from typing import Optional
from tml.common.batch import DataclassBatch
from tml.ml_logging.torch_logging import logging
import pyarrow as pa
import torch
The provided code snippet includes necessary dependencies for implementing the `roundrobin` function. Write a Python function `def roundrobin(*iterables)` to solve the following problem:
Round robin through provided iterables, useful for simple load balancing. Adapted from https://docs.python.org/3/library/itertools.html.
Here is the function:
def roundrobin(*iterables):
"""Round robin through provided iterables, useful for simple load balancing.
Adapted from https://docs.python.org/3/library/itertools.html.
"""
num_active = len(iterables)
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for _next in nexts:
result = _next()
yield result
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = itertools.cycle(itertools.islice(nexts, num_active))
logging.warning(f"Iterable exhausted, {num_active} iterables left.")
except Exception as exc:
logging.warning(f"Iterable raised exception {exc}, ignoring.")
# continue
raise | Round robin through provided iterables, useful for simple load balancing. Adapted from https://docs.python.org/3/library/itertools.html. |
22,126 | import itertools
import time
from typing import Optional
from tml.common.batch import DataclassBatch
from tml.ml_logging.torch_logging import logging
import pyarrow as pa
import torch
def speed_check(data_loader, max_steps: int, frequency: int, peek: Optional[int]):
num_examples = 0
prev = time.perf_counter()
for idx, batch in enumerate(data_loader):
if idx > max_steps:
break
if peek and idx % peek == 0:
logging.info(f"Batch: {batch}")
num_examples += batch.batch_size
if idx % frequency == 0:
now = time.perf_counter()
elapsed = now - prev
logging.info(
f"step: {idx}, "
f"elapsed(s): {elapsed}, "
f"examples: {num_examples}, "
f"ex/s: {num_examples / elapsed}, "
)
prev = now
num_examples = 0 | null |
22,127 | import itertools
import time
from typing import Optional
from tml.common.batch import DataclassBatch
from tml.ml_logging.torch_logging import logging
import pyarrow as pa
import torch
def pa_to_torch(array: pa.array) -> torch.Tensor:
return torch.from_numpy(array.to_numpy())
def create_default_pa_to_batch(schema) -> DataclassBatch:
""" """
_CustomBatch = DataclassBatch.from_schema("DefaultBatch", schema=schema)
def get_imputation_value(pa_type):
type_map = {
pa.float64(): pa.scalar(0, type=pa.float64()),
pa.int64(): pa.scalar(0, type=pa.int64()),
pa.string(): pa.scalar("", type=pa.string()),
}
if pa_type not in type_map:
raise Exception(f"Imputation for type {pa_type} not supported.")
return type_map[pa_type]
def _impute(array: pa.array) -> pa.array:
return array.fill_null(get_imputation_value(array.type))
def _column_to_tensor(record_batch: pa.RecordBatch):
tensors = {
col_name: pa_to_torch(_impute(record_batch.column(col_name)))
for col_name in record_batch.schema.names
}
return _CustomBatch(**tensors)
return _column_to_tensor | null |
22,128 | from typing import Optional
import uuid
from tml.ml_logging.torch_logging import logging
import tml.machines.environment as env
import packaging.version
import tensorflow as tf
from tensorflow.python.data.experimental.ops.data_service_ops import (
_from_dataset_id,
_register_dataset,
)
import torch.distributed as dist
def maybe_start_dataset_service():
if not env.has_readers():
return
if packaging.version.parse(tf.__version__) < packaging.version.parse("2.5"):
raise Exception(f"maybe_distribute_dataset requires TF >= 2.5; got {tf.__version__}")
if env.is_dispatcher():
logging.info(f"env.get_reader_port() = {env.get_reader_port()}")
logging.info(f"env.get_dds_journaling_dir() = {env.get_dds_journaling_dir()}")
work_dir = env.get_dds_journaling_dir()
server = tf.data.experimental.service.DispatchServer(
tf.data.experimental.service.DispatcherConfig(
port=env.get_reader_port(),
protocol="grpc",
work_dir=work_dir,
fault_tolerant_mode=bool(work_dir),
)
)
server.join()
elif env.is_reader():
logging.info(f"env.get_reader_port() = {env.get_reader_port()}")
logging.info(f"env.get_dds_dispatcher_address() = {env.get_dds_dispatcher_address()}")
logging.info(f"env.get_dds_worker_address() = {env.get_dds_worker_address()}")
server = tf.data.experimental.service.WorkerServer(
tf.data.experimental.service.WorkerConfig(
port=env.get_reader_port(),
dispatcher_address=env.get_dds_dispatcher_address(),
worker_address=env.get_dds_worker_address(),
protocol="grpc",
)
)
server.join() | null |
22,129 | from typing import Optional
import uuid
from tml.ml_logging.torch_logging import logging
import tml.machines.environment as env
import packaging.version
import tensorflow as tf
from tensorflow.python.data.experimental.ops.data_service_ops import (
_from_dataset_id,
_register_dataset,
)
import torch.distributed as dist
def register_dataset(
dataset: tf.data.Dataset, dataset_service: str, compression: Optional[str] = "AUTO"
):
if dist.get_rank() == 0:
dataset_id = _register_dataset(
service=dataset_service,
dataset=dataset,
compression=compression,
)
job_name = uuid.uuid4().hex[:8]
id_and_job = [dataset_id.numpy(), job_name]
logging.info(f"rank{dist.get_rank()}: Created dds job with {dataset_id.numpy()}, {job_name}")
else:
id_and_job = [None, None]
dist.broadcast_object_list(id_and_job, src=0)
return tuple(id_and_job)
def distribute_from_dataset_id(
dataset_service: str,
dataset_id: int,
job_name: Optional[str],
compression: Optional[str] = "AUTO",
prefetch: Optional[int] = tf.data.experimental.AUTOTUNE,
) -> tf.data.Dataset:
logging.info(f"rank{dist.get_rank()}: Consuming dds job with {dataset_id}, {job_name}")
dataset = _from_dataset_id(
processing_mode="parallel_epochs",
service=dataset_service,
dataset_id=dataset_id,
job_name=job_name,
element_spec=None,
compression=compression,
)
if prefetch is not None:
dataset = dataset.prefetch(prefetch)
return dataset
The provided code snippet includes necessary dependencies for implementing the `maybe_distribute_dataset` function. Write a Python function `def maybe_distribute_dataset(dataset: tf.data.Dataset) -> tf.data.Dataset` to solve the following problem:
Torch-compatible and distributed-training-aware dataset service distributor. - rank 0 process will register the given dataset. - rank 0 process will broadcast job name and dataset id. - all rank processes will consume from the same job/dataset. Without this, dataset workers will try to serve 1 job per rank process and OOM.
Here is the function:
def maybe_distribute_dataset(dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Torch-compatible and distributed-training-aware dataset service distributor.
- rank 0 process will register the given dataset.
- rank 0 process will broadcast job name and dataset id.
- all rank processes will consume from the same job/dataset.
Without this, dataset workers will try to serve 1 job per rank process and OOM.
"""
if not env.has_readers():
return dataset
dataset_service = env.get_dds()
logging.info(f"using DDS = {dataset_service}")
dataset_id, job_name = register_dataset(dataset=dataset, dataset_service=dataset_service)
dataset = distribute_from_dataset_id(
dataset_service=dataset_service, dataset_id=dataset_id, job_name=job_name
)
return dataset | Torch-compatible and distributed-training-aware dataset service distributor. - rank 0 process will register the given dataset. - rank 0 process will broadcast job name and dataset id. - all rank processes will consume from the same job/dataset. Without this, dataset workers will try to serve 1 job per rank process and OOM. |
22,130 | import abc
import functools
import random
from typing import Optional
from fsspec.implementations.local import LocalFileSystem
import pyarrow.dataset as pads
import pyarrow as pa
import pyarrow.parquet
import pyarrow.flight
from pyarrow.ipc import IpcWriteOptions
import torch
from tml.common.batch import DataclassBatch
from tml.machines import environment as env
import tml.reader.utils as reader_utils
from tml.common.filesystem import infer_fs
from tml.ml_logging.torch_logging import logging
GRPC_OPTIONS = [
("GRPC_ARG_KEEPALIVE_TIME_MS", 60000),
("GRPC_ARG_MIN_RECONNECT_BACKOFF_MS", 2000),
("GRPC_ARG_MAX_METADATA_SIZE", 1024 * 1024 * 1024),
]
def get_readers(num_readers_per_worker: int):
addresses = env.get_flight_server_addresses()
readers = []
for worker in addresses:
logging.info(f"Attempting connection to reader {worker}.")
client = pa.flight.connect(worker, generic_options=GRPC_OPTIONS)
client.wait_for_available(60)
reader = client.do_get(None).to_reader()
logging.info(f"Connected reader to {worker}.")
readers.append(reader)
return readers | null |
22,131 | from typing import Tuple, Union
import torch
import torchmetrics
def update_mean(
current_mean: torch.Tensor,
current_weight_sum: torch.Tensor,
value: torch.Tensor,
weight: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Update the mean according to Welford formula:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Weighted_batched_version.
See also https://nullbuffer.com/articles/welford_algorithm.html for more information.
Args:
current_mean: The value of the current accumulated mean.
current_weight_sum: The current weighted sum.
value: The new value that needs to be added to get a new mean.
weight: The weights for the new value.
Returns: The updated mean and updated weighted sum.
"""
weight = torch.broadcast_to(weight, value.shape)
# Avoiding (on purpose) in-place operation when using += in case
# current_mean and current_weight_sum share the same storage
current_weight_sum = current_weight_sum + torch.sum(weight)
current_mean = current_mean + torch.sum((weight / current_weight_sum) * (value - current_mean))
return current_mean, current_weight_sum
The provided code snippet includes necessary dependencies for implementing the `stable_mean_dist_reduce_fn` function. Write a Python function `def stable_mean_dist_reduce_fn(state: torch.Tensor) -> torch.Tensor` to solve the following problem:
Merge the state from multiple workers. Args: state: A tensor with the first dimension indicating workers. Returns: The accumulated mean from all workers.
Here is the function:
def stable_mean_dist_reduce_fn(state: torch.Tensor) -> torch.Tensor:
"""
Merge the state from multiple workers.
Args:
state: A tensor with the first dimension indicating workers.
Returns: The accumulated mean from all workers.
"""
mean, weight_sum = update_mean(
current_mean=torch.as_tensor(0.0, dtype=state.dtype, device=state.device),
current_weight_sum=torch.as_tensor(0.0, dtype=state.dtype, device=state.device),
value=state[:, 0],
weight=state[:, 1],
)
return torch.stack([mean, weight_sum]) | Merge the state from multiple workers. Args: state: A tensor with the first dimension indicating workers. Returns: The accumulated mean from all workers. |
22,132 | from typing import Union
from tml.ml_logging.torch_logging import logging
import torch
import torchmetrics
from torchmetrics.utilities.data import dim_zero_cat
The provided code snippet includes necessary dependencies for implementing the `_compute_helper` function. Write a Python function `def _compute_helper( predictions: torch.Tensor, target: torch.Tensor, weights: torch.Tensor, max_positive_negative_weighted_sum: torch.Tensor, min_positive_negative_weighted_sum: torch.Tensor, equal_predictions_as_incorrect: bool, ) -> torch.Tensor` to solve the following problem:
Compute AUROC. Args: predictions: The predictions probabilities. target: The target. weights: The sample weights to assign to each sample in the batch. max_positive_negative_weighted_sum: The sum of the weights for the positive labels. min_positive_negative_weighted_sum: equal_predictions_as_incorrect: For positive & negative labels having identical scores, we assume that they are correct prediction (i.e weight = 1) when ths is False. Otherwise, we assume that they are correct prediction (i.e weight = 0).
Here is the function:
def _compute_helper(
predictions: torch.Tensor,
target: torch.Tensor,
weights: torch.Tensor,
max_positive_negative_weighted_sum: torch.Tensor,
min_positive_negative_weighted_sum: torch.Tensor,
equal_predictions_as_incorrect: bool,
) -> torch.Tensor:
"""
Compute AUROC.
Args:
predictions: The predictions probabilities.
target: The target.
weights: The sample weights to assign to each sample in the batch.
max_positive_negative_weighted_sum: The sum of the weights for the positive labels.
min_positive_negative_weighted_sum:
equal_predictions_as_incorrect: For positive & negative labels having identical scores,
we assume that they are correct prediction (i.e weight = 1) when ths is False. Otherwise,
we assume that they are correct prediction (i.e weight = 0).
"""
dim = 0
# Sort predictions based on key (score, true_label). The order is ascending for score.
# For true_label, order is ascending if equal_predictions_as_incorrect is True;
# otherwise it is descending.
target_order = torch.argsort(target, dim=dim, descending=equal_predictions_as_incorrect)
score_order = torch.sort(torch.gather(predictions, dim, target_order), stable=True, dim=dim)[1]
score_order = torch.gather(target_order, dim, score_order)
sorted_target = torch.gather(target, dim, score_order)
sorted_weights = torch.gather(weights, dim, score_order)
negatives_from_left = torch.cumsum((1.0 - sorted_target) * sorted_weights, 0)
numerator = torch.sum(
sorted_weights * (sorted_target * negatives_from_left / max_positive_negative_weighted_sum)
)
return numerator / min_positive_negative_weighted_sum | Compute AUROC. Args: predictions: The predictions probabilities. target: The target. weights: The sample weights to assign to each sample in the batch. max_positive_negative_weighted_sum: The sum of the weights for the positive labels. min_positive_negative_weighted_sum: equal_predictions_as_incorrect: For positive & negative labels having identical scores, we assume that they are correct prediction (i.e weight = 1) when ths is False. Otherwise, we assume that they are correct prediction (i.e weight = 0). |
22,133 | import copy
from functools import partial
from typing import Union
from tml.metrics import aggregation
import torch
import torchmetrics
The provided code snippet includes necessary dependencies for implementing the `_smooth` function. Write a Python function `def _smooth( value: torch.Tensor, label_smoothing: Union[float, torch.Tensor] ) -> Union[float, torch.Tensor]` to solve the following problem:
Smooth given values. Args: value: Value to smooth. label_smoothing: smoothing constant. Returns: Smoothed values.
Here is the function:
def _smooth(
value: torch.Tensor, label_smoothing: Union[float, torch.Tensor]
) -> Union[float, torch.Tensor]:
"""
Smooth given values.
Args:
value: Value to smooth.
label_smoothing: smoothing constant.
Returns: Smoothed values.
"""
return value * (1.0 - label_smoothing) + 0.5 * label_smoothing | Smooth given values. Args: value: Value to smooth. label_smoothing: smoothing constant. Returns: Smoothed values. |
22,134 | import copy
from functools import partial
from typing import Union
from tml.metrics import aggregation
import torch
import torchmetrics
The provided code snippet includes necessary dependencies for implementing the `_binary_cross_entropy_with_clipping` function. Write a Python function `def _binary_cross_entropy_with_clipping( predictions: torch.Tensor, target: torch.Tensor, epsilon: Union[float, torch.Tensor], reduction: str = "none", ) -> torch.Tensor` to solve the following problem:
Clip Predictions and apply binary cross entropy. This is done to match the implementation in keras at https://github.com/keras-team/keras/blob/r2.9/keras/backend.py#L5294-L5300 Args: predictions: Predicted probabilities. target: Ground truth. epsilon: Epsilon fuzz factor used to clip the predictions. reduction: The reduction method to use. Returns: Binary cross entropy on the clipped predictions.
Here is the function:
def _binary_cross_entropy_with_clipping(
predictions: torch.Tensor,
target: torch.Tensor,
epsilon: Union[float, torch.Tensor],
reduction: str = "none",
) -> torch.Tensor:
"""
Clip Predictions and apply binary cross entropy.
This is done to match the implementation in keras at
https://github.com/keras-team/keras/blob/r2.9/keras/backend.py#L5294-L5300
Args:
predictions: Predicted probabilities.
target: Ground truth.
epsilon: Epsilon fuzz factor used to clip the predictions.
reduction: The reduction method to use.
Returns: Binary cross entropy on the clipped predictions.
"""
predictions = torch.clamp(predictions, epsilon, 1.0 - epsilon)
bce = -target * torch.log(predictions + epsilon)
bce -= (1.0 - target) * torch.log(1.0 - predictions + epsilon)
if reduction == "mean":
return torch.mean(bce)
return bce | Clip Predictions and apply binary cross entropy. This is done to match the implementation in keras at https://github.com/keras-team/keras/blob/r2.9/keras/backend.py#L5294-L5300 Args: predictions: Predicted probabilities. target: Ground truth. epsilon: Epsilon fuzz factor used to clip the predictions. reduction: The reduction method to use. Returns: Binary cross entropy on the clipped predictions. |
22,135 | import typing
import tml.core.config as base_config
import pydantic
class OptimizerConfig(base_config.BaseConfig):
learning_rate: LearningRate = pydantic.Field(
None,
description="Constant learning rates",
)
adam: AdamConfig = pydantic.Field(None, one_of="optimizer")
sgd: SgdConfig = pydantic.Field(None, one_of="optimizer")
adagrad: AdagradConfig = pydantic.Field(None, one_of="optimizer")
def get_optimizer_algorithm_config(optimizer_config: OptimizerConfig):
if optimizer_config.adam is not None:
return optimizer_config.adam
elif optimizer_config.sgd is not None:
return optimizer_config.sgd
elif optimizer_config.adagrad is not None:
return optimizer_config.adagrad
else:
raise ValueError(f"No optimizer selected in optimizer_config, passed {optimizer_config}") | null |
22,136 | from typing import Dict, Tuple
import math
import bisect
from tml.optimizers.config import (
LearningRate,
OptimizerConfig,
)
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from tml.ml_logging.torch_logging import logging
The provided code snippet includes necessary dependencies for implementing the `compute_lr` function. Write a Python function `def compute_lr(lr_config, step)` to solve the following problem:
Compute a learning rate.
Here is the function:
def compute_lr(lr_config, step):
"""Compute a learning rate."""
if lr_config.constant is not None:
return lr_config.constant
elif lr_config.piecewise_constant is not None:
return lr_config.piecewise_constant.learning_rate_values[
bisect.bisect_right(lr_config.piecewise_constant.learning_rate_boundaries, step)
]
elif lr_config.linear_ramp_to_constant is not None:
slope = (
lr_config.linear_ramp_to_constant.learning_rate
/ lr_config.linear_ramp_to_constant.num_ramp_steps
)
return min(lr_config.linear_ramp_to_constant.learning_rate, slope * step)
elif lr_config.linear_ramp_to_cosine is not None:
cfg = lr_config.linear_ramp_to_cosine
if step < cfg.num_ramp_steps:
slope = cfg.learning_rate / cfg.num_ramp_steps
return slope * step
elif step <= cfg.final_num_steps:
return cfg.final_learning_rate + (cfg.learning_rate - cfg.final_learning_rate) * 0.5 * (
1.0
+ math.cos(
math.pi * (step - cfg.num_ramp_steps) / (cfg.final_num_steps - cfg.num_ramp_steps)
)
)
else:
return cfg.final_learning_rate
else:
raise ValueError(f"No option selected in lr_config, passed {lr_config}") | Compute a learning rate. |
22,137 | from typing import Dict, Tuple
import math
import bisect
from tml.optimizers.config import (
LearningRate,
OptimizerConfig,
)
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from tml.ml_logging.torch_logging import logging
class LRShim(_LRScheduler):
"""Shim to get learning rates into a LRScheduler.
This adheres to the torch.optim scheduler API and can be plugged anywhere that
e.g. exponential decay can be used.
"""
def __init__(
self,
optimizer,
lr_dict: Dict[str, LearningRate],
last_epoch=-1,
verbose=False,
):
self.optimizer = optimizer
self.lr_dict = lr_dict
self.group_names = list(self.lr_dict.keys())
num_param_groups = sum(1 for _, _optim in optimizer._optims for _ in _optim.param_groups)
if num_param_groups != len(lr_dict):
raise ValueError(
f"Optimizer had {len(optimizer.param_groups)}, but config had {len(lr_dict)}."
)
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
logging.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.",
UserWarning,
)
return self._get_closed_form_lr()
def _get_closed_form_lr(self):
return [compute_lr(lr_config, self.last_epoch) for lr_config in self.lr_dict.values()]
def get_optimizer_class(optimizer_config: OptimizerConfig):
if optimizer_config.adam is not None:
return torch.optim.Adam
elif optimizer_config.sgd is not None:
return torch.optim.SGD
elif optimizer_config.adagrad is not None:
return torch.optim.Adagrad
The provided code snippet includes necessary dependencies for implementing the `build_optimizer` function. Write a Python function `def build_optimizer( model: torch.nn.Module, optimizer_config: OptimizerConfig ) -> Tuple[Optimizer, _LRScheduler]` to solve the following problem:
Builds an optimizer and LR scheduler from an OptimizerConfig. Note: use this when you want the same optimizer and learning rate schedule for all your parameters.
Here is the function:
def build_optimizer(
model: torch.nn.Module, optimizer_config: OptimizerConfig
) -> Tuple[Optimizer, _LRScheduler]:
"""Builds an optimizer and LR scheduler from an OptimizerConfig.
Note: use this when you want the same optimizer and learning rate schedule for all your parameters.
"""
optimizer_class = get_optimizer_class(optimizer_config)
optimizer = optimizer_class(model.parameters(), **optimizer_config.sgd.dict())
# We're passing everything in as one group here
scheduler = LRShim(optimizer, lr_dict={"ALL_PARAMS": optimizer_config.learning_rate})
return optimizer, scheduler | Builds an optimizer and LR scheduler from an OptimizerConfig. Note: use this when you want the same optimizer and learning rate schedule for all your parameters. |
22,138 | from typing import Iterable, Optional, Dict, Callable, List
import torch
from torch.optim.lr_scheduler import _LRScheduler
import torchmetrics as tm
from tml.ml_logging.torch_logging import logging
def train(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
train_steps: int,
dataset: Iterable,
scheduler: _LRScheduler = None,
# Accept any arguments (to be compatible with the real training loop)
# but just ignore them.
*args,
**kwargs,
) -> None:
logging.warning("Running debug training loop, don't use for model training.")
data_iter = iter(dataset)
for step in range(0, train_steps + 1):
x = next(data_iter)
optimizer.zero_grad()
loss, outputs = model.forward(x)
loss.backward()
optimizer.step()
if scheduler:
scheduler.step()
logging.info(f"Step {step} completed. Loss = {loss}") | null |
22,139 | import typing
from tml.core.loss_type import LossType
from tml.ml_logging.torch_logging import logging
import torch
def _maybe_warn(reduction: str):
_LOSS_TYPE_TO_FUNCTION = {
LossType.BCE_WITH_LOGITS: torch.nn.functional.binary_cross_entropy_with_logits
}
def build_loss(
loss_type: LossType,
reduction="mean",
):
_maybe_warn(reduction)
f = _LOSS_TYPE_TO_FUNCTION[loss_type]
def loss_fn(logits, labels):
return f(logits, labels.type_as(logits), reduction=reduction)
return loss_fn | null |
22,140 | import typing
from tml.core.loss_type import LossType
from tml.ml_logging.torch_logging import logging
import torch
The provided code snippet includes necessary dependencies for implementing the `get_global_loss_detached` function. Write a Python function `def get_global_loss_detached(local_loss, reduction="mean")` to solve the following problem:
Perform all_reduce to obtain the global loss function using the provided reduction. :param local_loss: The local loss of the current rank. :param reduction: The reduction to use for all_reduce. Should match the reduction used by DDP. :return: The reduced & detached global loss.
Here is the function:
def get_global_loss_detached(local_loss, reduction="mean"):
"""
Perform all_reduce to obtain the global loss function using the provided reduction.
:param local_loss: The local loss of the current rank.
:param reduction: The reduction to use for all_reduce. Should match the reduction used by DDP.
:return: The reduced & detached global loss.
"""
if reduction != "mean":
logging.warn(
f"The reduction used in this function should be the same as the one used by "
f"the DDP model. By default DDP uses mean, So ensure that DDP is appropriately"
f"modified for reduction {reduction}."
)
if reduction not in ["mean", "sum"]:
raise ValueError(f"Reduction {reduction} is currently unsupported.")
global_loss = local_loss.detach()
if reduction == "mean":
global_loss.div_(torch.distributed.get_world_size())
torch.distributed.all_reduce(global_loss)
return global_loss | Perform all_reduce to obtain the global loss function using the provided reduction. :param local_loss: The local loss of the current rank. :param reduction: The reduction to use for all_reduce. Should match the reduction used by DDP. :return: The reduced & detached global loss. |
22,141 | import typing
from tml.core.loss_type import LossType
from tml.ml_logging.torch_logging import logging
import torch
def _maybe_warn(reduction: str):
"""
Warning for reduction different than mean.
"""
if reduction != "mean":
logging.warn(
f"For the same global_batch_size, the gradient in DDP is guaranteed to be equal,"
f"to the gradient without DDP only for mean reduction. If you need this property for"
f"the provided reduction {reduction}, it needs to be implemented."
)
_LOSS_TYPE_TO_FUNCTION = {
LossType.BCE_WITH_LOGITS: torch.nn.functional.binary_cross_entropy_with_logits
}
def build_multi_task_loss(
loss_type: LossType,
tasks: typing.List[str],
task_loss_reduction="mean",
global_reduction="mean",
pos_weights=None,
):
_maybe_warn(global_reduction)
_maybe_warn(task_loss_reduction)
f = _LOSS_TYPE_TO_FUNCTION[loss_type]
loss_reduction_fns = {
"mean": torch.mean,
"sum": torch.sum,
"min": torch.min,
"max": torch.max,
"median": torch.median,
}
def loss_fn(logits: torch.Tensor, labels: torch.Tensor, weights: torch.Tensor):
if pos_weights is None:
torch_weights = torch.ones([len(tasks)])
else:
torch_weights = torch.tensor(pos_weights)
losses = {}
for task_idx, task in enumerate(tasks):
task_logits = logits[:, task_idx]
label = labels[:, task_idx].type_as(task_logits)
loss = f(
task_logits,
label,
reduction=task_loss_reduction,
pos_weight=torch_weights[task_idx],
weight=weights[:, task_idx],
)
losses[f"loss/{task}"] = loss
losses["loss"] = loss_reduction_fns[global_reduction](torch.stack(list(losses.values())))
return losses
return loss_fn | null |
22,142 | from abc import abstractmethod
from typing import Callable, Dict, List
from tml.ml_logging.torch_logging import logging
import torch
import torchmetrics
class MetricMixin:
def transform(self, outputs: Dict[str, torch.Tensor]) -> Dict:
...
def update(self, outputs: Dict[str, torch.Tensor]):
results = self.transform(outputs)
# Do not try to update if any tensor is empty as a result of stratification.
for value in results.values():
if torch.is_tensor(value) and not value.nelement():
return
super().update(**results)
The provided code snippet includes necessary dependencies for implementing the `prepend_transform` function. Write a Python function `def prepend_transform(base_metric: torchmetrics.Metric, transform: Callable)` to solve the following problem:
Returns new class using MetricMixin and given base_metric. Functionally the same using inheritance, just saves some lines of code if no need for class attributes.
Here is the function:
def prepend_transform(base_metric: torchmetrics.Metric, transform: Callable):
"""Returns new class using MetricMixin and given base_metric.
Functionally the same using inheritance, just saves some lines of code
if no need for class attributes.
"""
def transform_method(_self, *args, **kwargs):
return transform(*args, **kwargs)
return type(
base_metric.__name__,
(
MetricMixin,
base_metric,
),
{"transform": transform_method},
) | Returns new class using MetricMixin and given base_metric. Functionally the same using inheritance, just saves some lines of code if no need for class attributes. |
22,143 | import abc
from dataclasses import dataclass, field
import logging
from typing import (
Any,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
)
import torch
from torch.autograd.profiler import record_function
from torch.fx.node import Node
from torchrec.distributed.model_parallel import (
DistributedModelParallel,
ShardedModule,
)
from torchrec.distributed.types import Awaitable
from torchrec.modules.feature_processor import BaseGroupedFeatureProcessor
from torchrec.streamable import Multistreamable, Pipelineable
In = TypeVar("In", bound=Pipelineable)
def _to_device(batch: In, device: torch.device, non_blocking: bool) -> In:
assert isinstance(
batch, (torch.Tensor, Pipelineable)
), f"{type(batch)} must implement Pipelineable interface"
return cast(In, batch.to(device=device, non_blocking=non_blocking)) | null |
22,144 | import abc
from dataclasses import dataclass, field
import logging
from typing import (
Any,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
)
import torch
from torch.autograd.profiler import record_function
from torch.fx.node import Node
from torchrec.distributed.model_parallel import (
DistributedModelParallel,
ShardedModule,
)
from torchrec.distributed.types import Awaitable
from torchrec.modules.feature_processor import BaseGroupedFeatureProcessor
from torchrec.streamable import Multistreamable, Pipelineable
In = TypeVar("In", bound=Pipelineable)
def _wait_for_batch(batch: In, stream: Optional[torch.cuda.streams.Stream]) -> None:
if stream is None:
return
torch.cuda.current_stream().wait_stream(stream)
# As mentioned in https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html,
# PyTorch uses the "caching allocator" for memory allocation for tensors. When a tensor is
# freed, its memory is likely to be reused by newly constructed tenosrs. By default,
# this allocator traces whether a tensor is still in use by only the CUDA stream where it
# was created. When a tensor is used by additional CUDA streams, we need to call record_stream
# to tell the allocator about all these streams. Otherwise, the allocator might free the
# underlying memory of the tensor once it is no longer used by the creator stream. This is
# a notable programming trick when we write programs using multi CUDA streams.
cur_stream = torch.cuda.current_stream()
assert isinstance(
batch, (torch.Tensor, Multistreamable)
), f"{type(batch)} must implement Multistreamable interface"
batch.record_stream(cur_stream) | null |
22,145 | import abc
from dataclasses import dataclass, field
import logging
from typing import (
Any,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
)
import torch
from torch.autograd.profiler import record_function
from torch.fx.node import Node
from torchrec.distributed.model_parallel import (
DistributedModelParallel,
ShardedModule,
)
from torchrec.distributed.types import Awaitable
from torchrec.modules.feature_processor import BaseGroupedFeatureProcessor
from torchrec.streamable import Multistreamable, Pipelineable
In = TypeVar("In", bound=Pipelineable)
class TrainPipelineContext:
class PipelinedForward:
def __init__(
self,
name: str,
args: List[ArgInfo],
module: ShardedModule,
context: TrainPipelineContext,
dist_stream: Optional[torch.cuda.streams.Stream],
) -> None:
def __call__(self, *input, **kwargs) -> Awaitable:
def name(self) -> str:
def args(self) -> List[ArgInfo]:
def _start_data_dist(
pipelined_modules: List[ShardedModule],
batch: In,
context: TrainPipelineContext,
) -> None:
context.input_dist_requests.clear()
context.module_contexts.clear()
for module in pipelined_modules:
forward = module.forward
assert isinstance(forward, PipelinedForward)
# Retrieve argument for the input_dist of EBC
# is_getitem True means this argument could be retrieved by a list
# False means this argument is getting while getattr
# and this info was done in the _rewrite_model by tracing the
# entire model to get the arg_info_list
args = []
kwargs = {}
for arg_info in forward.args:
if arg_info.input_attrs:
arg = batch
for attr, is_getitem in zip(arg_info.input_attrs, arg_info.is_getitems):
if is_getitem:
arg = arg[attr]
else:
arg = getattr(arg, attr)
if arg_info.name:
kwargs[arg_info.name] = arg
else:
args.append(arg)
else:
args.append(None)
# Start input distribution.
module_ctx = module.create_context()
context.module_contexts[forward.name] = module_ctx
context.input_dist_requests[forward.name] = module.input_dist(module_ctx, *args, **kwargs)
# Call wait on the first awaitable in the input dist for the tensor splits
for key, awaitable in context.input_dist_requests.items():
context.input_dist_requests[key] = awaitable.wait() | null |
22,146 | import abc
from dataclasses import dataclass, field
import logging
from typing import (
Any,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
)
import torch
from torch.autograd.profiler import record_function
from torch.fx.node import Node
from torchrec.distributed.model_parallel import (
DistributedModelParallel,
ShardedModule,
)
from torchrec.distributed.types import Awaitable
from torchrec.modules.feature_processor import BaseGroupedFeatureProcessor
from torchrec.streamable import Multistreamable, Pipelineable
logger: logging.Logger = logging.getLogger(__name__)
class Tracer(torch.fx.Tracer):
# Disable proxying buffers during tracing. Ideally, proxying buffers would
# be disabled, but some models are currently mutating buffer values, which
# causes errors during tracing. If those models can be rewritten to not do
# that, we can likely remove this line
proxy_buffer_attributes = False
def __init__(self, leaf_modules: Optional[List[str]] = None) -> None:
super().__init__()
self._leaf_modules: List[str] = leaf_modules if leaf_modules is not None else []
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
if isinstance(m, ShardedModule) or module_qualified_name in self._leaf_modules:
return True
return super().is_leaf_module(m, module_qualified_name)
class TrainPipelineContext:
# pyre-ignore [4]
input_dist_requests: Dict[str, Awaitable[Any]] = field(default_factory=dict)
module_contexts: Dict[str, Multistreamable] = field(default_factory=dict)
# pyre-ignore [4]
feature_processor_forwards: List[Any] = field(default_factory=list)
class PipelinedForward:
def __init__(
self,
name: str,
args: List[ArgInfo],
module: ShardedModule,
context: TrainPipelineContext,
dist_stream: Optional[torch.cuda.streams.Stream],
) -> None:
self._name = name
self._args = args
self._module = module
self._context = context
self._dist_stream = dist_stream
# pyre-ignore [2, 24]
def __call__(self, *input, **kwargs) -> Awaitable:
assert self._name in self._context.input_dist_requests
request = self._context.input_dist_requests[self._name]
assert isinstance(request, Awaitable)
with record_function("## wait_sparse_data_dist ##"):
# Finish waiting on the dist_stream,
# in case some delayed stream scheduling happens during the wait() call.
with torch.cuda.stream(self._dist_stream):
data = request.wait()
# Make sure that both result of input_dist and context
# are properly transferred to the current stream.
if self._dist_stream is not None:
torch.cuda.current_stream().wait_stream(self._dist_stream)
cur_stream = torch.cuda.current_stream()
assert isinstance(
data, (torch.Tensor, Multistreamable)
), f"{type(data)} must implement Multistreamable interface"
# pyre-fixme[6]: For 1st param expected `Stream` but got `Stream`.
data.record_stream(cur_stream)
ctx = self._context.module_contexts[self._name]
ctx.record_stream(cur_stream)
if len(self._context.feature_processor_forwards) > 0:
with record_function("## feature_processor ##"):
for sparse_feature in data:
if sparse_feature.id_score_list_features is not None:
for fp_forward in self._context.feature_processor_forwards:
sparse_feature.id_score_list_features = fp_forward(
sparse_feature.id_score_list_features
)
return self._module.compute_and_output_dist(self._context.module_contexts[self._name], data)
def name(self) -> str:
return self._name
def args(self) -> List[ArgInfo]:
return self._args
def _get_node_args(
node: Node, feature_processor_nodes: Optional[List[Node]] = None
) -> Tuple[List[ArgInfo], int]:
num_found = 0
pos_arg_info_list, num_found = _get_node_args_helper(
node.args, num_found, feature_processor_nodes
)
kwargs_arg_info_list, num_found = _get_node_args_helper(node.kwargs.values(), num_found)
# Replace with proper names for kwargs
for name, arg_info_list in zip(node.kwargs, kwargs_arg_info_list):
arg_info_list.name = name
arg_info_list = pos_arg_info_list + kwargs_arg_info_list
return arg_info_list, num_found
def _get_unsharded_module_names(model: torch.nn.Module) -> List[str]:
"""
Returns a list of top level modules do not contain any sharded sub modules.
"""
unsharded_module_names: Set[str] = set()
_get_unsharded_module_names_helper(
model,
"",
unsharded_module_names,
)
return list(unsharded_module_names)
def _rewrite_model( # noqa C901
model: torch.nn.Module,
context: TrainPipelineContext,
dist_stream: Optional[torch.cuda.streams.Stream],
) -> List[ShardedModule]:
# Get underlying nn.Module
if isinstance(model, DistributedModelParallel):
model = model.module
# Collect a list of sharded modules.
sharded_modules = {}
fp_modules = {}
for name, m in model.named_modules():
if isinstance(m, ShardedModule):
sharded_modules[name] = m
if isinstance(m, BaseGroupedFeatureProcessor):
fp_modules[name] = m
# Trace a model.
tracer = Tracer(leaf_modules=_get_unsharded_module_names(model))
graph = tracer.trace(model)
feature_processor_nodes = []
# find the fp node
for node in graph.nodes:
if node.op == "call_module" and node.target in fp_modules:
feature_processor_nodes.append(node)
# Select sharded modules, which are top-level in the forward call graph,
# i.e. which don't have input transformations, i.e.
# rely only on 'builtins.getattr'.
ret = []
for node in graph.nodes:
if node.op == "call_module" and node.target in sharded_modules:
total_num_args = len(node.args) + len(node.kwargs)
if total_num_args == 0:
continue
arg_info_list, num_found = _get_node_args(node, feature_processor_nodes)
if num_found == total_num_args:
logger.info(f"Module '{node.target}'' will be pipelined")
child = sharded_modules[node.target]
child.forward = PipelinedForward(
node.target,
arg_info_list,
child,
context,
dist_stream,
)
ret.append(child)
return ret | null |
22,147 | import yaml
import string
import getpass
import os
from typing import Type
from tml.core.config.base_config import BaseConfig
The provided code snippet includes necessary dependencies for implementing the `load_config_from_yaml` function. Write a Python function `def load_config_from_yaml(config_type: Type[BaseConfig], yaml_path: str)` to solve the following problem:
Recommend method to load a config file (a yaml file) and parse it. Because we have a shared filesystem the recommended route to running jobs it put modified config files with the desired parameters somewhere on the filesytem and run jobs pointing to them.
Here is the function:
def load_config_from_yaml(config_type: Type[BaseConfig], yaml_path: str):
"""Recommend method to load a config file (a yaml file) and parse it.
Because we have a shared filesystem the recommended route to running jobs it put modified config
files with the desired parameters somewhere on the filesytem and run jobs pointing to them.
"""
def _substitute(s):
return string.Template(s).safe_substitute(os.environ, USER=getpass.getuser())
with open(yaml_path, "r") as f:
raw_contents = f.read()
obj = yaml.safe_load(_substitute(raw_contents))
return config_type.parse_obj(obj) | Recommend method to load a config file (a yaml file) and parse it. Because we have a shared filesystem the recommended route to running jobs it put modified config files with the desired parameters somewhere on the filesytem and run jobs pointing to them. |
22,148 | import datetime
import os
from typing import Callable, Dict, Iterable, List, Mapping, Optional
from tml.common import log_weights
import tml.common.checkpointing.snapshot as snapshot_lib
from tml.core.losses import get_global_loss_detached
from tml.ml_logging.torch_logging import logging
from tml.core.train_pipeline import TrainPipelineSparseDist
import tree
import torch
import torch.distributed as dist
from torch.optim.lr_scheduler import _LRScheduler
import torchmetrics as tm
def get_new_iterator(iterable: Iterable):
"""
This obtain a new iterator from the iterable. If the iterable uses tf.data.Dataset internally,
getting a new iterator each N steps will avoid memory leak. To avoid the memory leak
calling iter(iterable) should return a "fresh" iterator using a fresh
(new instance of) tf.data.Iterator.
In particular, iterable can be a torch.utils.data.IterableDataset or a
torch.utils.data.DataLoader.
When using DDS, performing this reset does not change the order in which elements are received
(excluding elements already prefetched) provided that iter(iterable) internally uses
a new instance of tf.data.Dataset created by calling from_dataset_id.
This requirement is satisfied by RecapDataset.
:param iterable:
:return:
"""
return iter(iterable)
def _run_evaluation(
pipeline,
dataset,
eval_steps: int,
metrics: tm.MetricCollection,
eval_batch_size: int,
logger=None,
):
"""Runs the evaluation loop over all evaluation iterators."""
dataset = get_new_iterator(dataset)
step_fn = _get_step_fn(pipeline, dataset, training=False)
last_time = datetime.datetime.now()
logging.info(f"Starting {eval_steps} steps of evaluation.")
for _ in range(eval_steps):
outputs = step_fn()
metrics.update(outputs)
eval_ex_per_s = (
eval_batch_size * eval_steps / (datetime.datetime.now() - last_time).total_seconds()
)
logging.info(f"eval examples_per_s : {eval_ex_per_s}")
metrics_result = metrics.compute()
# Resetting at end to release metrics memory not in use.
# Reset metrics to prevent accumulation between multiple evaluation splits and not report a
# running average.
metrics.reset()
return metrics_result
def log_eval_results(
results,
eval_logger,
partition_name: str,
step: int,
):
results = tree.map_structure(lambda elem: torch.as_tensor(elem).cpu(), results)
logging.info(f"Step: {step}, evaluation ({partition_name}).")
for metric_name, metric_value in results.items():
logging.info(f"\t{metric_name}: {metric_value:1.4e}")
if eval_logger:
eval_logger.log(results, step=step, commit=True)
def only_evaluate(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
device: str,
save_dir: str,
num_train_steps: int,
dataset: Iterable,
eval_batch_size: int,
num_eval_steps: int,
eval_timeout_in_s: int,
eval_logger: Callable,
partition_name: str,
metrics: Optional[tm.MetricCollection] = None,
):
logging.info(f"Evaluating on partition {partition_name}.")
logging.info("Computing metrics:")
logging.info(metrics)
eval_pipeline = TrainPipelineSparseDist(model, optimizer, device) # type: ignore[var-annotated]
save_state = {
"model": eval_pipeline._model,
"optimizer": eval_pipeline._optimizer,
}
checkpoint_handler = snapshot_lib.Snapshot(
save_dir=save_dir,
state=save_state,
)
for checkpoint_path in snapshot_lib.checkpoints_iterator(save_dir, timeout=eval_timeout_in_s):
checkpoint_handler.restore(checkpoint_path)
step = checkpoint_handler.step
dataset = get_new_iterator(dataset)
results = _run_evaluation(
pipeline=eval_pipeline,
dataset=dataset,
eval_steps=num_eval_steps,
eval_batch_size=eval_batch_size,
metrics=metrics,
)
log_eval_results(results, eval_logger, partition_name, step=step)
rank = dist.get_rank() if dist.is_initialized() else 0
if rank == 0:
snapshot_lib.mark_done_eval(checkpoint_path, partition_name)
if step >= num_train_steps:
return | null |
22,149 | from typing import Any, Dict
from tml.core.metric_mixin import MetricMixin, StratifyMixin, TaskMixin
import torch
import torchmetrics as tm
def probs_and_labels(
outputs: Dict[str, torch.Tensor],
task_idx: int,
) -> Dict[str, torch.Tensor]:
preds = outputs["probabilities"]
target = outputs["labels"]
if task_idx >= 0:
preds = preds[:, task_idx]
target = target[:, task_idx]
return {
"preds": preds,
"target": target.int(),
} | null |
22,150 | from absl import app, flags
import json
from typing import Optional
import os
import sys
import torch
from tml.common.device import setup_and_get_device
from tml.common.utils import setup_configuration
import tml.core.custom_training_loop as ctl
import tml.machines.environment as env
from tml.projects.twhin.models.models import apply_optimizers, TwhinModel, TwhinModelAndLoss
from tml.model import maybe_shard_model
from tml.projects.twhin.metrics import create_metrics
from tml.projects.twhin.config import TwhinConfig
from tml.projects.twhin.data.data import create_dataset
from tml.projects.twhin.optimizer import build_optimizer
from tml.ml_logging.torch_logging import logging
import torch.distributed as dist
from torch.nn import functional as F
from torchrec.optim.apply_optimizer_in_backward import apply_optimizer_in_backward
from torchrec.distributed.model_parallel import get_module
def run(
all_config: TwhinConfig,
save_dir: Optional[str] = None,
):
train_dataset = create_dataset(all_config.train_data, all_config.model)
if env.is_reader():
train_dataset.serve()
if env.is_chief():
device = setup_and_get_device(tf_ok=False)
logging.info(f"device: {device}")
logging.info(f"WORLD_SIZE: {dist.get_world_size()}")
# validation_dataset = create_dataset(all_config.validation_data, all_config.model)
global_batch_size = all_config.train_data.per_replica_batch_size * dist.get_world_size()
metrics = create_metrics(device)
model = TwhinModel(all_config.model, all_config.train_data)
apply_optimizers(model, all_config.model)
model = maybe_shard_model(model, device=device)
optimizer, scheduler = build_optimizer(model=model, config=all_config.model)
loss_fn = F.binary_cross_entropy_with_logits
model_and_loss = TwhinModelAndLoss(
model, loss_fn, data_config=all_config.train_data, device=device
)
ctl.train(
model=model_and_loss,
optimizer=optimizer,
device=device,
save_dir=save_dir,
logging_interval=all_config.training.train_log_every_n,
train_steps=all_config.training.num_train_steps,
checkpoint_frequency=all_config.training.checkpoint_every_n,
dataset=train_dataset.dataloader(remote=False),
worker_batch_size=global_batch_size,
num_workers=0,
scheduler=scheduler,
initial_checkpoint_dir=all_config.training.initial_checkpoint_dir,
gradient_accumulation=all_config.training.gradient_accumulation,
) | null |
22,151 | from tml.projects.twhin.data.config import TwhinDataConfig
from tml.projects.twhin.models.config import TwhinModelConfig
from tml.projects.twhin.data.edges import EdgesDataset
def create_dataset(data_config: TwhinDataConfig, model_config: TwhinModelConfig):
tables = model_config.embeddings.tables
table_sizes = {table.name: table.num_embeddings for table in tables}
relations = model_config.relations
pos_batch_size = data_config.per_replica_batch_size
return EdgesDataset(
file_pattern=data_config.data_root,
relations=relations,
table_sizes=table_sizes,
batch_size=pos_batch_size,
) | null |
22,152 | import functools
from tml.projects.twhin.models.config import TwhinModelConfig
from tml.projects.twhin.models.models import TwhinModel
from tml.optimizers.optimizer import get_optimizer_class, LRShim
from tml.optimizers.config import get_optimizer_algorithm_config, LearningRate
from tml.ml_logging.torch_logging import logging
from torchrec.optim.optimizers import in_backward_optimizer_filter
from torchrec.optim import keyed
FUSED_OPT_KEY = "fused_opt"
TRANSLATION_OPT_KEY = "operator_opt"
def _lr_from_config(optimizer_config):
if optimizer_config.learning_rate is not None:
return optimizer_config.learning_rate
else:
# treat None as constant lr
lr_value = get_optimizer_algorithm_config(optimizer_config).lr
return LearningRate(constant=lr_value)
The provided code snippet includes necessary dependencies for implementing the `build_optimizer` function. Write a Python function `def build_optimizer(model: TwhinModel, config: TwhinModelConfig)` to solve the following problem:
Builds an optimizer for a Twhin model combining the embeddings optimizer with an optimizer for per-relation translations. Args: model: TwhinModel to build optimizer for. config: TwhinConfig for model. Returns: Optimizer for model.
Here is the function:
def build_optimizer(model: TwhinModel, config: TwhinModelConfig):
"""Builds an optimizer for a Twhin model combining the embeddings optimizer with an optimizer for per-relation translations.
Args:
model: TwhinModel to build optimizer for.
config: TwhinConfig for model.
Returns:
Optimizer for model.
"""
translation_optimizer_fn = functools.partial(
get_optimizer_class(config.translation_optimizer),
**get_optimizer_algorithm_config(config.translation_optimizer).dict(),
)
translation_optimizer = keyed.KeyedOptimizerWrapper(
dict(in_backward_optimizer_filter(model.named_parameters())),
optim_factory=translation_optimizer_fn,
)
lr_dict = {}
for table in config.embeddings.tables:
lr_dict[table.name] = _lr_from_config(table.optimizer)
lr_dict[TRANSLATION_OPT_KEY] = _lr_from_config(config.translation_optimizer)
logging.info(f"***** LR dict: {lr_dict} *****")
logging.info(
f"***** Combining fused optimizer {model.fused_optimizer} with operator optimizer: {translation_optimizer} *****"
)
optimizer = keyed.CombinedOptimizer(
[
(FUSED_OPT_KEY, model.fused_optimizer),
(TRANSLATION_OPT_KEY, translation_optimizer),
]
)
# scheduler = LRShim(optimizer, lr_dict)
scheduler = None
logging.info(f"***** Combined optimizer after init: {optimizer} *****")
return optimizer, scheduler | Builds an optimizer for a Twhin model combining the embeddings optimizer with an optimizer for per-relation translations. Args: model: TwhinModel to build optimizer for. config: TwhinConfig for model. Returns: Optimizer for model. |
22,153 | from typing import Callable
import math
from tml.projects.twhin.data.edges import EdgeBatch
from tml.projects.twhin.models.config import TwhinModelConfig
from tml.projects.twhin.data.config import TwhinDataConfig
from tml.common.modules.embedding.embedding import LargeEmbeddings
from tml.optimizers.optimizer import get_optimizer_class
from tml.optimizers.config import get_optimizer_algorithm_config
import torch
from torch import nn
from torchrec.optim.apply_optimizer_in_backward import apply_optimizer_in_backward
class TwhinModel(nn.Module):
def __init__(self, model_config: TwhinModelConfig, data_config: TwhinDataConfig):
super().__init__()
self.batch_size = data_config.per_replica_batch_size
self.table_names = [table.name for table in model_config.embeddings.tables]
self.large_embeddings = LargeEmbeddings(model_config.embeddings)
self.embedding_dim = model_config.embeddings.tables[0].embedding_dim
self.num_tables = len(model_config.embeddings.tables)
self.in_batch_negatives = data_config.in_batch_negatives
self.global_negatives = data_config.global_negatives
self.num_relations = len(model_config.relations)
# one bias per relation
self.all_trans_embs = torch.nn.parameter.Parameter(
torch.nn.init.uniform_(torch.empty(self.num_relations, self.embedding_dim))
)
def forward(self, batch: EdgeBatch):
# B x D
trans_embs = self.all_trans_embs.data[batch.rels]
# KeyedTensor
outs = self.large_embeddings(batch.nodes)
# 2B x TD
x = outs.values()
# 2B x T x D
x = x.reshape(2 * self.batch_size, -1, self.embedding_dim)
# 2B x D
x = torch.sum(x, 1)
# B x 2 x D
x = x.reshape(self.batch_size, 2, self.embedding_dim)
# translated
translated = x[:, 1, :] + trans_embs
negs = []
if self.in_batch_negatives:
# construct dot products for negatives via matmul
for relation in range(self.num_relations):
rel_mask = batch.rels == relation
rel_count = rel_mask.sum()
if not rel_count:
continue
# R x D
lhs_matrix = x[rel_mask, 0, :]
rhs_matrix = x[rel_mask, 1, :]
lhs_perm = torch.randperm(lhs_matrix.shape[0])
# repeat until we have enough negatives
lhs_perm = lhs_perm.repeat(math.ceil(float(self.in_batch_negatives) / rel_count))
lhs_indices = lhs_perm[: self.in_batch_negatives]
sampled_lhs = lhs_matrix[lhs_indices]
rhs_perm = torch.randperm(rhs_matrix.shape[0])
# repeat until we have enough negatives
rhs_perm = rhs_perm.repeat(math.ceil(float(self.in_batch_negatives) / rel_count))
rhs_indices = rhs_perm[: self.in_batch_negatives]
sampled_rhs = rhs_matrix[rhs_indices]
# RS
negs_rhs = torch.flatten(torch.matmul(lhs_matrix, sampled_rhs.t()))
negs_lhs = torch.flatten(torch.matmul(rhs_matrix, sampled_lhs.t()))
negs.append(negs_lhs)
negs.append(negs_rhs)
# dot product for positives
x = (x[:, 0, :] * translated).sum(-1)
# concat positives and negatives
x = torch.cat([x, *negs])
return {
"logits": x,
"probabilities": torch.sigmoid(x),
}
def apply_optimizers(model: TwhinModel, model_config: TwhinModelConfig):
for table in model_config.embeddings.tables:
optimizer_class = get_optimizer_class(table.optimizer)
optimizer_kwargs = get_optimizer_algorithm_config(table.optimizer).dict()
params = [
param
for name, param in model.large_embeddings.ebc.named_parameters()
if (name.startswith(f"embedding_bags.{table.name}"))
]
apply_optimizer_in_backward(
optimizer_class=optimizer_class,
params=params,
optimizer_kwargs=optimizer_kwargs,
)
return model | null |
22,154 | import torch
import torchmetrics as tm
import tml.core.metrics as core_metrics
def create_metrics(
device: torch.device,
):
metrics = dict()
metrics.update(
{
"AUC": core_metrics.Auc(128),
}
)
metrics = tm.MetricCollection(metrics).to(device)
return metrics | null |
22,155 | import datetime
import os
from typing import Callable, List, Optional, Tuple
import tensorflow as tf
import tml.common.checkpointing.snapshot as snapshot_lib
from tml.common.device import setup_and_get_device
from tml.core import config as tml_config_mod
import tml.core.custom_training_loop as ctl
from tml.core import debug_training_loop
from tml.core import losses
from tml.core.loss_type import LossType
from tml.model import maybe_shard_model
import tml.projects.home.recap.data.dataset as ds
import tml.projects.home.recap.config as recap_config_mod
import tml.projects.home.recap.optimizer as optimizer_mod
import tml.projects.home.recap.model as model_mod
import torchmetrics as tm
import torch
import torch.distributed as dist
from torchrec.distributed.model_parallel import DistributedModelParallel
from absl import app, flags, logging
FLAGS = flags.FLAGS
def run(unused_argv: str, data_service_dispatcher: Optional[str] = None):
print("#" * 100)
config = tml_config_mod.load_config_from_yaml(recap_config_mod.RecapConfig, FLAGS.config_path)
logging.info("Config: %s", config.pretty_print())
device = setup_and_get_device()
# Always enable tensorfloat on supported devices.
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
loss_fn = losses.build_multi_task_loss(
loss_type=LossType.BCE_WITH_LOGITS,
tasks=list(config.model.tasks.keys()),
pos_weights=[task.pos_weight for task in config.model.tasks.values()],
)
# Since the prod model doesn't use large embeddings, for now we won't support them.
assert config.model.large_embeddings is None
train_dataset = ds.RecapDataset(
data_config=config.train_data,
dataset_service=data_service_dispatcher,
mode=recap_config_mod.JobMode.TRAIN,
compression=config.train_data.dataset_service_compression,
vocab_mapper=None,
repeat=True,
)
train_iterator = iter(train_dataset.to_dataloader())
torch_element_spec = train_dataset.torch_element_spec
model = model_mod.create_ranking_model(
data_spec=torch_element_spec[0],
config=config,
loss_fn=loss_fn,
device=device,
)
optimizer, scheduler = optimizer_mod.build_optimizer(model, config.optimizer, None)
model = maybe_shard_model(model, device)
datetime_str = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")
print(f"{datetime_str}\n", end="")
if FLAGS.debug_loop:
logging.warning("Running debug mode, slow!")
train_mod = debug_training_loop
else:
train_mod = ctl
train_mod.train(
model=model,
optimizer=optimizer,
device=device,
save_dir=config.training.save_dir,
logging_interval=config.training.train_log_every_n,
train_steps=config.training.num_train_steps,
checkpoint_frequency=config.training.checkpoint_every_n,
dataset=train_iterator,
worker_batch_size=config.train_data.global_batch_size,
enable_amp=False,
initial_checkpoint_dir=config.training.initial_checkpoint_dir,
gradient_accumulation=config.training.gradient_accumulation,
scheduler=scheduler,
) | null |
22,156 | import os
import json
from absl import app, flags, logging
import tensorflow as tf
from typing import Dict
from tml.projects.home.recap.data import tfe_parsing
from tml.core import config as tml_config_mod
import tml.projects.home.recap.config as recap_config_mod
FLAGS = flags.FLAGS
def generate_data(data_path: str, config: recap_config_mod.RecapConfig):
with tf.io.gfile.GFile(config.train_data.seg_dense_schema.schema_path, "r") as f:
seg_dense_schema = json.load(f)["schema"]
tf_example_schema = tfe_parsing.create_tf_example_schema(
config.train_data,
seg_dense_schema,
)
record_filename = os.path.join(data_path, "random.tfrecord.gz")
with tf.io.TFRecordWriter(record_filename, "GZIP") as writer:
random_example = _generate_random_example(tf_example_schema)
serialized_example = _serialize_example(random_example)
writer.write(serialized_example)
def _generate_data_main(unused_argv):
config = tml_config_mod.load_config_from_yaml(recap_config_mod.RecapConfig, FLAGS.config_path)
# Find the path where to put the data
data_path = os.path.dirname(config.train_data.inputs)
logging.info("Putting random data in %s", data_path)
generate_data(data_path, config) | null |
22,157 | from typing import Mapping, Tuple, Union
import torch
import torchrec
import numpy as np
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `keyed_tensor_from_tensors_dict` function. Write a Python function `def keyed_tensor_from_tensors_dict( tensor_map: Mapping[str, torch.Tensor] ) -> "torchrec.KeyedTensor"` to solve the following problem:
Convert a dictionary of torch tensor to torchrec keyed tensor Args: tensor_map: Returns:
Here is the function:
def keyed_tensor_from_tensors_dict(
tensor_map: Mapping[str, torch.Tensor]
) -> "torchrec.KeyedTensor":
"""
Convert a dictionary of torch tensor to torchrec keyed tensor
Args:
tensor_map:
Returns:
"""
keys = list(tensor_map.keys())
# We expect batch size to be first dim. However, if we get a shape [Batch_size],
# KeyedTensor will not find the correct batch_size. So, in those cases we make sure the shape is
# [Batch_size x 1].
values = [
tensor_map[key] if len(tensor_map[key].shape) > 1 else torch.unsqueeze(tensor_map[key], -1)
for key in keys
]
return torchrec.KeyedTensor.from_tensor_list(keys, values) | Convert a dictionary of torch tensor to torchrec keyed tensor Args: tensor_map: Returns: |
22,158 | from typing import Mapping, Tuple, Union
import torch
import torchrec
import numpy as np
import tensorflow as tf
def _compute_jagged_tensor_from_tensor(tensor: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if tensor.is_sparse:
x = tensor.coalesce() # Ensure that the indices are ordered.
lengths = torch.bincount(x.indices()[0])
values = x.values()
else:
values = tensor
lengths = torch.ones(tensor.shape[0], dtype=torch.int32, device=tensor.device)
return values, lengths
The provided code snippet includes necessary dependencies for implementing the `jagged_tensor_from_tensor` function. Write a Python function `def jagged_tensor_from_tensor(tensor: torch.Tensor) -> "torchrec.JaggedTensor"` to solve the following problem:
Convert a torch tensor to torchrec jagged tensor. Note: Currently only support shape of [Batch_size] or [Batch_size x N] for dense tensors. For sparse tensor the shape of .values() should be [Batch_size] or [Batch_size x N]; the dense_shape of the sparse tensor can be arbitrary. Args: tensor: a torch (sparse) tensor. Returns:
Here is the function:
def jagged_tensor_from_tensor(tensor: torch.Tensor) -> "torchrec.JaggedTensor":
"""
Convert a torch tensor to torchrec jagged tensor.
Note: Currently only support shape of [Batch_size] or [Batch_size x N] for dense tensors.
For sparse tensor the shape of .values() should be [Batch_size] or [Batch_size x N]; the
dense_shape of the sparse tensor can be arbitrary.
Args:
tensor: a torch (sparse) tensor.
Returns:
"""
values, lengths = _compute_jagged_tensor_from_tensor(tensor)
return torchrec.JaggedTensor(values=values, lengths=lengths) | Convert a torch tensor to torchrec jagged tensor. Note: Currently only support shape of [Batch_size] or [Batch_size x N] for dense tensors. For sparse tensor the shape of .values() should be [Batch_size] or [Batch_size x N]; the dense_shape of the sparse tensor can be arbitrary. Args: tensor: a torch (sparse) tensor. Returns: |
22,159 | from typing import Mapping, Tuple, Union
import torch
import torchrec
import numpy as np
import tensorflow as tf
def _compute_jagged_tensor_from_tensor(tensor: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if tensor.is_sparse:
x = tensor.coalesce() # Ensure that the indices are ordered.
lengths = torch.bincount(x.indices()[0])
values = x.values()
else:
values = tensor
lengths = torch.ones(tensor.shape[0], dtype=torch.int32, device=tensor.device)
return values, lengths
The provided code snippet includes necessary dependencies for implementing the `keyed_jagged_tensor_from_tensors_dict` function. Write a Python function `def keyed_jagged_tensor_from_tensors_dict( tensor_map: Mapping[str, torch.Tensor] ) -> "torchrec.KeyedJaggedTensor"` to solve the following problem:
Convert a dictionary of (sparse) torch tensors to torchrec keyed jagged tensor. Note: Currently only support shape of [Batch_size] or [Batch_size x 1] for dense tensors. For sparse tensor the shape of .values() should be [Batch_size] or [Batch_size x 1]; the dense_shape of the sparse tensor can be arbitrary. Args: tensor_map: Returns:
Here is the function:
def keyed_jagged_tensor_from_tensors_dict(
tensor_map: Mapping[str, torch.Tensor]
) -> "torchrec.KeyedJaggedTensor":
"""
Convert a dictionary of (sparse) torch tensors to torchrec keyed jagged tensor.
Note: Currently only support shape of [Batch_size] or [Batch_size x 1] for dense tensors.
For sparse tensor the shape of .values() should be [Batch_size] or [Batch_size x 1]; the
dense_shape of the sparse tensor can be arbitrary.
Args:
tensor_map:
Returns:
"""
if not tensor_map:
return torchrec.KeyedJaggedTensor(
keys=[],
values=torch.zeros(0, dtype=torch.int),
lengths=torch.zeros(0, dtype=torch.int),
)
values = []
lengths = []
for tensor in tensor_map.values():
tensor_val, tensor_len = _compute_jagged_tensor_from_tensor(tensor)
values.append(torch.squeeze(tensor_val))
lengths.append(tensor_len)
values = torch.cat(values, axis=0)
lengths = torch.cat(lengths, axis=0)
return torchrec.KeyedJaggedTensor(
keys=list(tensor_map.keys()),
values=values,
lengths=lengths,
) | Convert a dictionary of (sparse) torch tensors to torchrec keyed jagged tensor. Note: Currently only support shape of [Batch_size] or [Batch_size x 1] for dense tensors. For sparse tensor the shape of .values() should be [Batch_size] or [Batch_size x 1]; the dense_shape of the sparse tensor can be arbitrary. Args: tensor_map: Returns: |
22,160 | from typing import Mapping, Tuple, Union
import torch
import torchrec
import numpy as np
import tensorflow as tf
def _tf_to_numpy(tf_tensor: tf.Tensor) -> np.ndarray:
return tf_tensor._numpy() # noqa
def _dense_tf_to_torch(tensor: tf.Tensor, pin_memory: bool) -> torch.Tensor:
tensor = _tf_to_numpy(tensor)
# Pytorch does not support bfloat16, up cast to float32 to keep the same number of bits on exponent
if tensor.dtype.name == "bfloat16":
tensor = tensor.astype(np.float32)
tensor = torch.from_numpy(tensor)
if pin_memory:
tensor = tensor.pin_memory()
return tensor
def sparse_or_dense_tf_to_torch(
tensor: Union[tf.Tensor, tf.SparseTensor], pin_memory: bool
) -> torch.Tensor:
if isinstance(tensor, tf.SparseTensor):
tensor = torch.sparse_coo_tensor(
_dense_tf_to_torch(tensor.indices, pin_memory).t(),
_dense_tf_to_torch(tensor.values, pin_memory),
torch.Size(_tf_to_numpy(tensor.dense_shape)),
)
else:
tensor = _dense_tf_to_torch(tensor, pin_memory)
return tensor | null |
22,161 | import functools
import json
from tml.projects.home.recap.data import config as recap_data_config
from absl import logging
import tensorflow as tf
def create_tf_example_schema(
data_config: recap_data_config.SegDenseSchema,
segdense_schema,
):
"""Generate schema for deseralizing tf.Example.
Args:
segdense_schema: List of dicts of segdense features (includes feature_name, dtype, length).
labels: List of strings denoting labels.
Returns:
A dictionary schema suitable for deserializing tf.Example.
"""
segdense_config = data_config.seg_dense_schema
labels = list(data_config.tasks.keys())
used_features = (
segdense_config.features + list(segdense_config.renamed_features.values()) + labels
)
logging.info(used_features)
tfe_schema = {}
for entry in segdense_schema:
feature_name = entry["feature_name"]
if feature_name in used_features:
length = entry["length"]
dtype = entry["dtype"]
if feature_name in labels:
logging.info(f"Label: feature name is {feature_name} type is {dtype}")
tfe_schema[feature_name] = tf.io.FixedLenFeature(
length, DTYPE_MAP[dtype], DEFAULTS_MAP[dtype]
)
elif length == -1:
tfe_schema[feature_name] = tf.io.VarLenFeature(DTYPE_MAP[dtype])
else:
tfe_schema[feature_name] = tf.io.FixedLenFeature(
length, DTYPE_MAP[dtype], [DEFAULTS_MAP[dtype]] * length
)
for feature_name in used_features:
if feature_name not in tfe_schema:
raise ValueError(f"{feature_name} missing from schema: {segdense_config.schema_path}.")
return tfe_schema
def parse_tf_example(
serialized_example,
tfe_schema,
seg_dense_schema_config,
):
"""Parse serialized tf.Example into dict of tensors.
Args:
serialized_example: Serialized tf.Example to be parsed.
tfe_schema: Dictionary schema suitable for deserializing tf.Example.
Returns:
Dictionary of tensors to be used as model input.
"""
inputs = tf.io.parse_example(serialized=serialized_example, features=tfe_schema)
for new_feature_name, old_feature_name in seg_dense_schema_config.renamed_features.items():
inputs[new_feature_name] = inputs.pop(old_feature_name)
# This should not actually be used except for experimentation with low precision floats.
if "mask_mantissa_features" in seg_dense_schema_config:
for feature_name, mask_length in seg_dense_schema_config.mask_mantissa_features.items():
inputs[feature_name] = mask_mantissa(inputs[feature_name], mask_length)
# DANGER DANGER: This default seems really scary, and it's only here because it has to be visible
# at TF level.
# We should not return empty tensors if we dont use embeddings.
# Otherwise, it breaks numpy->pt conversion
renamed_keys = list(seg_dense_schema_config.renamed_features.keys())
for renamed_key in renamed_keys:
if "embedding" in renamed_key and (renamed_key not in inputs):
inputs[renamed_key] = tf.zeros([], tf.float32)
logging.info(f"parsed example and inputs are {inputs}")
return inputs
The provided code snippet includes necessary dependencies for implementing the `get_seg_dense_parse_fn` function. Write a Python function `def get_seg_dense_parse_fn(data_config: recap_data_config.RecapDataConfig)` to solve the following problem:
Placeholder for seg dense. In the future, when we use more seg dense variations, we can change this.
Here is the function:
def get_seg_dense_parse_fn(data_config: recap_data_config.RecapDataConfig):
"""Placeholder for seg dense.
In the future, when we use more seg dense variations, we can change this.
"""
with tf.io.gfile.GFile(data_config.seg_dense_schema.schema_path, "r") as f:
seg_dense_schema = json.load(f)["schema"]
tf_example_schema = create_tf_example_schema(
data_config,
seg_dense_schema,
)
logging.info("***** TF Example Schema *****")
logging.info(tf_example_schema)
parse = functools.partial(
parse_tf_example,
tfe_schema=tf_example_schema,
seg_dense_schema_config=data_config.seg_dense_schema,
)
return parse | Placeholder for seg dense. In the future, when we use more seg dense variations, we can change this. |
22,162 | from tml.projects.home.recap import config as config_mod
from absl import logging
import tensorflow as tf
import numpy as np
class TruncateAndSlice(tf.keras.Model):
"""Class for truncating and slicing."""
def __init__(self, truncate_and_slice_config):
super().__init__()
self._truncate_and_slice_config = truncate_and_slice_config
if self._truncate_and_slice_config.continuous_feature_mask_path:
with tf.io.gfile.GFile(
self._truncate_and_slice_config.continuous_feature_mask_path, "rb"
) as f:
self._continuous_mask = np.load(f).nonzero()[0]
logging.info(f"Slicing {np.sum(self._continuous_mask)} continuous features.")
else:
self._continuous_mask = None
if self._truncate_and_slice_config.binary_feature_mask_path:
with tf.io.gfile.GFile(self._truncate_and_slice_config.binary_feature_mask_path, "rb") as f:
self._binary_mask = np.load(f).nonzero()[0]
logging.info(f"Slicing {np.sum(self._binary_mask)} binary features.")
else:
self._binary_mask = None
def call(self, inputs, training=None, mask=None):
outputs = tf.nest.pack_sequence_as(inputs, tf.nest.flatten(inputs))
if self._truncate_and_slice_config.continuous_feature_truncation:
logging.info("Truncating continuous")
outputs["continuous"] = outputs["continuous"][
:, : self._truncate_and_slice_config.continuous_feature_truncation
]
if self._truncate_and_slice_config.binary_feature_truncation:
logging.info("Truncating binary")
outputs["binary"] = outputs["binary"][
:, : self._truncate_and_slice_config.binary_feature_truncation
]
if self._continuous_mask is not None:
outputs["continuous"] = tf.gather(outputs["continuous"], self._continuous_mask, axis=1)
if self._binary_mask is not None:
outputs["binary"] = tf.gather(outputs["binary"], self._binary_mask, axis=1)
return outputs
class DownCast(tf.keras.Model):
"""Class for Down casting dataset before serialization and transferring to training host.
Depends on the data type and the actual data range, the down casting can be lossless or not.
It is strongly recommended to compare the metrics before and after down casting.
"""
def __init__(self, downcast_config):
super().__init__()
self.config = downcast_config
self._type_map = {
"bfloat16": tf.bfloat16,
"bool": tf.bool,
}
def call(self, inputs, training=None, mask=None):
outputs = tf.nest.pack_sequence_as(inputs, tf.nest.flatten(inputs))
for feature, type_str in self.config.features.items():
assert type_str in self._type_map
if type_str == "bfloat16":
logging.warning(
"Although bfloat16 and float32 have the same number of exponent bits, this down casting is not 100% lossless. Please double check metrics."
)
down_cast_data_type = self._type_map[type_str]
outputs[feature] = tf.cast(outputs[feature], dtype=down_cast_data_type)
return outputs
class RectifyLabels(tf.keras.Model):
"""Class for rectifying labels"""
def __init__(self, rectify_label_config):
super().__init__()
self._config = rectify_label_config
self._window = int(self._config.label_rectification_window_in_hours * 60 * 60 * 1000)
def call(self, inputs, training=None, mask=None):
served_ts_field = self._config.served_timestamp_field
impressed_ts_field = self._config.impressed_timestamp_field
for label, engaged_ts_field in self._config.label_to_engaged_timestamp_field.items():
impressed = inputs[impressed_ts_field]
served = inputs[served_ts_field]
engaged = inputs[engaged_ts_field]
keep = tf.math.logical_and(inputs[label] > 0, impressed - served < self._window)
keep = tf.math.logical_and(keep, engaged - served < self._window)
inputs[label] = tf.where(keep, inputs[label], tf.zeros_like(inputs[label]))
return inputs
class ExtractFeatures(tf.keras.Model):
"""Class for extracting individual features from dense tensors by their index."""
def __init__(self, extract_features_config):
super().__init__()
self._config = extract_features_config
def call(self, inputs, training=None, mask=None):
for row in self._config.extract_feature_table:
inputs[row.name] = inputs[row.source_tensor][:, row.index]
return inputs
class DownsampleNegatives(tf.keras.Model):
"""Class for down-sampling/dropping negatives and updating the weights.
If inputs['fav'] = [1, 0, 0, 0] and inputs['weights'] = [1.0, 1.0, 1.0, 1.0]
inputs are transformed to inputs['fav'] = [1, 0] and inputs['weights'] = [1.0, 3.0]
when batch_multiplier=2 and engagements_list=['fav']
It supports multiple engagements (union/logical_or is used to aggregate engagements), so we don't
drop positives for any engagement.
"""
def __init__(self, downsample_negatives_config):
super().__init__()
self.config = downsample_negatives_config
def call(self, inputs, training=None, mask=None):
labels = self.config.engagements_list
# union of engagements
mask = tf.squeeze(tf.reduce_any(tf.stack([inputs[label] == 1 for label in labels], 1), 1))
n_positives = tf.reduce_sum(tf.cast(mask, tf.int32))
batch_size = tf.cast(tf.shape(inputs[labels[0]])[0] / self.config.batch_multiplier, tf.int32)
negative_weights = tf.math.divide_no_nan(
tf.cast(self.config.batch_multiplier * batch_size - n_positives, tf.float32),
tf.cast(batch_size - n_positives, tf.float32),
)
new_weights = tf.cast(mask, tf.float32) + (1 - tf.cast(mask, tf.float32)) * negative_weights
def _split_by_label_concatenate_and_truncate(input_tensor):
# takes positive examples and concatenate with negative examples and truncate
# DANGER: if n_positives > batch_size down-sampling is incorrect (do not use pb_50)
return tf.concat(
[
input_tensor[mask],
input_tensor[tf.math.logical_not(mask)],
],
0,
)[:batch_size]
if "weights" not in inputs:
# add placeholder so logic below applies even if weights aren't present in inputs
inputs["weights"] = tf.ones([tf.shape(inputs[labels[0]])[0], self.config.num_engagements])
for tensor in inputs:
if tensor == "weights":
inputs[tensor] = inputs[tensor] * tf.reshape(new_weights, [-1, 1])
inputs[tensor] = _split_by_label_concatenate_and_truncate(inputs[tensor])
return inputs
The provided code snippet includes necessary dependencies for implementing the `build_preprocess` function. Write a Python function `def build_preprocess(preprocess_config, mode=config_mod.JobMode.TRAIN)` to solve the following problem:
Builds a preprocess model to apply all preprocessing stages.
Here is the function:
def build_preprocess(preprocess_config, mode=config_mod.JobMode.TRAIN):
"""Builds a preprocess model to apply all preprocessing stages."""
if mode == config_mod.JobMode.INFERENCE:
logging.info("Not building preprocessors for dataloading since we are in Inference mode.")
return None
preprocess_models = []
if preprocess_config.downsample_negatives:
preprocess_models.append(DownsampleNegatives(preprocess_config.downsample_negatives))
if preprocess_config.truncate_and_slice:
preprocess_models.append(TruncateAndSlice(preprocess_config.truncate_and_slice))
if preprocess_config.downcast:
preprocess_models.append(DownCast(preprocess_config.downcast))
if preprocess_config.rectify_labels:
preprocess_models.append(RectifyLabels(preprocess_config.rectify_labels))
if preprocess_config.extract_features:
preprocess_models.append(ExtractFeatures(preprocess_config.extract_features))
if len(preprocess_models) == 0:
raise ValueError("No known preprocessor.")
class PreprocessModel(tf.keras.Model):
def __init__(self, preprocess_models):
super().__init__()
self.preprocess_models = preprocess_models
def call(self, inputs, training=None, mask=None):
outputs = inputs
for model in self.preprocess_models:
outputs = model(outputs, training, mask)
return outputs
if len(preprocess_models) > 1:
logging.warning(
"With multiple preprocessing models, we apply these models in a predefined order. Future works may introduce customized models and orders."
)
return PreprocessModel(preprocess_models) | Builds a preprocess model to apply all preprocessing stages. |
22,163 | from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple, Dict
import functools
import torch
import tensorflow as tf
from tml.common.batch import DataclassBatch
from tml.projects.home.recap.data.config import RecapDataConfig, TaskData
from tml.projects.home.recap.data import preprocessors
from tml.projects.home.recap.config import JobMode
from tml.projects.home.recap.data.tfe_parsing import get_seg_dense_parse_fn
from tml.projects.home.recap.data.util import (
keyed_jagged_tensor_from_tensors_dict,
sparse_or_dense_tf_to_torch,
)
from absl import logging
import torch.distributed as dist
class RecapBatch(DataclassBatch):
"""Holds features and labels from the Recap dataset."""
continuous_features: torch.Tensor
binary_features: torch.Tensor
discrete_features: torch.Tensor
sparse_features: "KeyedJaggedTensor" # type: ignore[name-defined] # noqa: F821
labels: torch.Tensor
user_embedding: torch.Tensor = None
user_eng_embedding: torch.Tensor = None
author_embedding: torch.Tensor = None
weights: torch.Tensor = None
def __post_init__(self):
if self.weights is None:
self.weights = torch.ones_like(self.labels)
for feature_name, feature_value in self.as_dict().items():
if ("embedding" in feature_name) and (feature_value is None):
setattr(self, feature_name, torch.empty([0, 0]))
The provided code snippet includes necessary dependencies for implementing the `to_batch` function. Write a Python function `def to_batch(x, sparse_feature_names: Optional[List[str]] = None) -> RecapBatch` to solve the following problem:
Converts a torch data loader output into `RecapBatch`.
Here is the function:
def to_batch(x, sparse_feature_names: Optional[List[str]] = None) -> RecapBatch:
"""Converts a torch data loader output into `RecapBatch`."""
x = tf.nest.map_structure(functools.partial(sparse_or_dense_tf_to_torch, pin_memory=False), x)
try:
features_in, labels = x
except ValueError:
# For Mode.INFERENCE, we do not expect to recieve labels as part of the input tuple
features_in, labels = x, None
sparse_features = keyed_jagged_tensor_from_tensors_dict({})
if sparse_feature_names:
sparse_features = keyed_jagged_tensor_from_tensors_dict(
{embedding_name: features_in[embedding_name] for embedding_name in sparse_feature_names}
)
user_embedding, user_eng_embedding, author_embedding = None, None, None
if "user_embedding" in features_in:
if sparse_feature_names and "meta__user_id" in sparse_feature_names:
raise ValueError("Only one source of embedding for user is supported")
else:
user_embedding = features_in["user_embedding"]
if "user_eng_embedding" in features_in:
if sparse_feature_names and "meta__user_eng_id" in sparse_feature_names:
raise ValueError("Only one source of embedding for user is supported")
else:
user_eng_embedding = features_in["user_eng_embedding"]
if "author_embedding" in features_in:
if sparse_feature_names and "meta__author_id" in sparse_feature_names:
raise ValueError("Only one source of embedding for user is supported")
else:
author_embedding = features_in["author_embedding"]
return RecapBatch(
continuous_features=features_in["continuous"],
binary_features=features_in["binary"],
discrete_features=features_in["discrete"],
sparse_features=sparse_features,
user_embedding=user_embedding,
user_eng_embedding=user_eng_embedding,
author_embedding=author_embedding,
labels=labels,
weights=features_in.get("weights", None), # Defaults to torch.ones_like(labels)
) | Converts a torch data loader output into `RecapBatch`. |
22,164 | from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple, Dict
import functools
import torch
import tensorflow as tf
from tml.common.batch import DataclassBatch
from tml.projects.home.recap.data.config import RecapDataConfig, TaskData
from tml.projects.home.recap.data import preprocessors
from tml.projects.home.recap.config import JobMode
from tml.projects.home.recap.data.tfe_parsing import get_seg_dense_parse_fn
from tml.projects.home.recap.data.util import (
keyed_jagged_tensor_from_tensors_dict,
sparse_or_dense_tf_to_torch,
)
from absl import logging
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `_chain` function. Write a Python function `def _chain(param, f1, f2)` to solve the following problem:
Reduce multiple functions into one chained function _chain(x, f1, f2) -> f2(f1(x))
Here is the function:
def _chain(param, f1, f2):
"""
Reduce multiple functions into one chained function
_chain(x, f1, f2) -> f2(f1(x))
"""
output = param
fns = [f1, f2]
for f in fns:
output = f(output)
return output | Reduce multiple functions into one chained function _chain(x, f1, f2) -> f2(f1(x)) |
22,165 | from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple, Dict
import functools
import torch
import tensorflow as tf
from tml.common.batch import DataclassBatch
from tml.projects.home.recap.data.config import RecapDataConfig, TaskData
from tml.projects.home.recap.data import preprocessors
from tml.projects.home.recap.config import JobMode
from tml.projects.home.recap.data.tfe_parsing import get_seg_dense_parse_fn
from tml.projects.home.recap.data.util import (
keyed_jagged_tensor_from_tensors_dict,
sparse_or_dense_tf_to_torch,
)
from absl import logging
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `_add_weights` function. Write a Python function `def _add_weights(inputs, tasks: Dict[str, TaskData])` to solve the following problem:
Adds weights based on label sampling for positive and negatives. This is useful for numeric calibration etc. This mutates inputs. Args: inputs: A dictionary of strings to tensor-like structures. tasks: A dict of string (label) to `TaskData` specifying inputs. Returns: A tuple of features and labels; weights are added to features.
Here is the function:
def _add_weights(inputs, tasks: Dict[str, TaskData]):
"""Adds weights based on label sampling for positive and negatives.
This is useful for numeric calibration etc. This mutates inputs.
Args:
inputs: A dictionary of strings to tensor-like structures.
tasks: A dict of string (label) to `TaskData` specifying inputs.
Returns:
A tuple of features and labels; weights are added to features.
"""
weights = []
for key, task in tasks.items():
label = inputs[key]
float_label = tf.cast(label, tf.float32)
weights.append(
float_label / task.pos_downsampling_rate + (1.0 - float_label) / task.neg_downsampling_rate
)
# Ensure we are batch-major (assumes we batch before this call).
inputs["weights"] = tf.squeeze(tf.transpose(tf.convert_to_tensor(weights)), axis=0)
return inputs | Adds weights based on label sampling for positive and negatives. This is useful for numeric calibration etc. This mutates inputs. Args: inputs: A dictionary of strings to tensor-like structures. tasks: A dict of string (label) to `TaskData` specifying inputs. Returns: A tuple of features and labels; weights are added to features. |
22,166 | from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple, Dict
import functools
import torch
import tensorflow as tf
from tml.common.batch import DataclassBatch
from tml.projects.home.recap.data.config import RecapDataConfig, TaskData
from tml.projects.home.recap.data import preprocessors
from tml.projects.home.recap.config import JobMode
from tml.projects.home.recap.data.tfe_parsing import get_seg_dense_parse_fn
from tml.projects.home.recap.data.util import (
keyed_jagged_tensor_from_tensors_dict,
sparse_or_dense_tf_to_torch,
)
from absl import logging
import torch.distributed as dist
def get_datetimes(explicit_datetime_inputs):
"""Compute list datetime strings for train/validation data."""
datetime_format = "%Y/%m/%d/%H"
end = datetime.strptime(explicit_datetime_inputs.end_datetime, datetime_format)
dates = sorted(
[
(end - timedelta(hours=i + 1)).strftime(datetime_format)
for i in range(int(explicit_datetime_inputs.hours))
]
)
return dates
The provided code snippet includes necessary dependencies for implementing the `get_explicit_datetime_inputs_files` function. Write a Python function `def get_explicit_datetime_inputs_files(explicit_datetime_inputs)` to solve the following problem:
Compile list of files for training/validation. Used with DataConfigs that use the `explicit_datetime_inputs` format to specify data. For each hour of data, if the directory is missing or empty, we increment a counter to keep track of the number of missing data hours. Returns only files with a `.gz` extension. Args: explicit_datetime_inputs: An `ExplicitDatetimeInputs` object within a `datasets.DataConfig` object Returns: data_files: Sorted list of files to read corresponding to data at the desired datetimes num_hours_missing: Number of hours that we are missing data
Here is the function:
def get_explicit_datetime_inputs_files(explicit_datetime_inputs):
"""
Compile list of files for training/validation.
Used with DataConfigs that use the `explicit_datetime_inputs` format to specify data.
For each hour of data, if the directory is missing or empty, we increment a counter to keep
track of the number of missing data hours.
Returns only files with a `.gz` extension.
Args:
explicit_datetime_inputs: An `ExplicitDatetimeInputs` object within a `datasets.DataConfig` object
Returns:
data_files: Sorted list of files to read corresponding to data at the desired datetimes
num_hours_missing: Number of hours that we are missing data
"""
datetimes = get_datetimes(explicit_datetime_inputs)
folders = [os.path.join(explicit_datetime_inputs.data_root, datetime) for datetime in datetimes]
data_files = []
num_hours_missing = 0
for folder in folders:
try:
files = tf.io.gfile.listdir(folder)
if not files:
logging.warning(f"{folder} contained no data files")
num_hours_missing += 1
data_files.extend(
[
os.path.join(folder, filename)
for filename in files
if filename.rsplit(".", 1)[-1].lower() == "gz"
]
)
except tf.errors.NotFoundError as e:
num_hours_missing += 1
logging.warning(f"Cannot find directory {folder}. Missing one hour of data. Error: \n {e}")
return sorted(data_files), num_hours_missing | Compile list of files for training/validation. Used with DataConfigs that use the `explicit_datetime_inputs` format to specify data. For each hour of data, if the directory is missing or empty, we increment a counter to keep track of the number of missing data hours. Returns only files with a `.gz` extension. Args: explicit_datetime_inputs: An `ExplicitDatetimeInputs` object within a `datasets.DataConfig` object Returns: data_files: Sorted list of files to read corresponding to data at the desired datetimes num_hours_missing: Number of hours that we are missing data |
22,167 | from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple, Dict
import functools
import torch
import tensorflow as tf
from tml.common.batch import DataclassBatch
from tml.projects.home.recap.data.config import RecapDataConfig, TaskData
from tml.projects.home.recap.data import preprocessors
from tml.projects.home.recap.config import JobMode
from tml.projects.home.recap.data.tfe_parsing import get_seg_dense_parse_fn
from tml.projects.home.recap.data.util import (
keyed_jagged_tensor_from_tensors_dict,
sparse_or_dense_tf_to_torch,
)
from absl import logging
import torch.distributed as dist
def _map_output_for_inference(
inputs, tasks: Dict[str, TaskData], preprocessor: tf.keras.Model = None, add_weights: bool = False
):
if preprocessor:
raise ValueError("No preprocessor should be used at inference time.")
if add_weights:
raise NotImplementedError()
# Add zero weights.
inputs["weights"] = tf.zeros_like(tf.expand_dims(inputs["continuous"][:, 0], -1))
for label in tasks:
del inputs[label]
return inputs | null |
22,168 | from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple, Dict
import functools
import torch
import tensorflow as tf
from tml.common.batch import DataclassBatch
from tml.projects.home.recap.data.config import RecapDataConfig, TaskData
from tml.projects.home.recap.data import preprocessors
from tml.projects.home.recap.config import JobMode
from tml.projects.home.recap.data.tfe_parsing import get_seg_dense_parse_fn
from tml.projects.home.recap.data.util import (
keyed_jagged_tensor_from_tensors_dict,
sparse_or_dense_tf_to_torch,
)
from absl import logging
import torch.distributed as dist
def _add_weights_based_on_sampling_rates(inputs, tasks: Dict[str, TaskData]):
"""Adds weights based on label sampling for positive and negatives.
This is useful for numeric calibration etc. This mutates inputs.
Args:
inputs: A dictionary of strings to tensor-like structures.
tasks: A dict of string (label) to `TaskData` specifying inputs.
Returns:
A tuple of features and labels; weights are added to features.
"""
weights = []
for key, task in tasks.items():
label = inputs[key]
float_label = tf.cast(label, tf.float32)
weights.append(
float_label / task.pos_downsampling_rate + (1.0 - float_label) / task.neg_downsampling_rate
)
# Ensure we are batch-major (assumes we batch before this call).
inputs["weights"] = tf.squeeze(tf.transpose(tf.convert_to_tensor(weights)), axis=0)
return inputs
def _map_output_for_train_eval(
inputs, tasks: Dict[str, TaskData], preprocessor: tf.keras.Model = None, add_weights: bool = False
):
if add_weights:
inputs = _add_weights_based_on_sampling_rates(inputs, tasks)
# Warning this has to happen first as it changes the input
if preprocessor:
inputs = preprocessor(inputs)
label_values = tf.squeeze(tf.stack([inputs[label] for label in tasks], axis=1), axis=[-1])
for label in tasks:
del inputs[label]
return inputs, label_values | null |
22,169 | import bisect
from collections import defaultdict
import functools
import math
import typing
from typing import Optional
import warnings
from tml.projects.home.recap import model as model_mod
from tml.optimizers import config
from tml.optimizers import compute_lr
from absl import logging
import torch
from torchrec.optim import keyed
_DEFAULT_LR = 24601.0
_BACKBONE = "backbone"
_DENSE_EMBEDDINGS = "dense_ebc"
class RecapLRShim(torch.optim.lr_scheduler._LRScheduler):
"""Shim to get learning rates into a LRScheduler.
This adheres to the torch.optim scheduler API and can be plugged anywhere that
e.g. exponential decay can be used.
"""
def __init__(
self,
optimizer,
lr_dict: typing.Dict[str, config.LearningRate],
emb_learning_rate,
last_epoch=-1,
verbose=False,
):
self.optimizer = optimizer
self.lr_dict = lr_dict
self.group_names = list(self.lr_dict.keys())
self.emb_learning_rate = emb_learning_rate
# We handle sparse LR scheduling separately, so only validate LR groups against dense param groups
num_dense_param_groups = sum(
1
for _, _optim in optimizer._optims
for _ in _optim.param_groups
if isinstance(_optim, keyed.KeyedOptimizerWrapper)
)
if num_dense_param_groups != len(lr_dict):
raise ValueError(
f"Optimizer had {len(optimizer.param_groups)}, but config had {len(lr_dict)}."
)
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.",
UserWarning,
)
return self._get_closed_form_lr()
def _get_closed_form_lr(self):
learning_rates = []
for lr_config in self.lr_dict.values():
learning_rates.append(compute_lr(lr_config, self.last_epoch))
# WARNING: The order of appending is important.
if self.emb_learning_rate:
learning_rates.append(compute_lr(self.emb_learning_rate, self.last_epoch))
return learning_rates
The provided code snippet includes necessary dependencies for implementing the `build_optimizer` function. Write a Python function `def build_optimizer( model: torch.nn.Module, optimizer_config: config.OptimizerConfig, emb_optimizer_config: None = None, # Optional[EmbeddingOptimizerConfig] = None, )` to solve the following problem:
Builds an optimizer and scheduler. Args: model: A torch model, probably with DDP/DMP. optimizer_config: An OptimizerConfig object that specifies learning rates per tower. Returns: A torch.optim instance, and a scheduler instance.
Here is the function:
def build_optimizer(
model: torch.nn.Module,
optimizer_config: config.OptimizerConfig,
emb_optimizer_config: None = None, # Optional[EmbeddingOptimizerConfig] = None,
):
"""Builds an optimizer and scheduler.
Args:
model: A torch model, probably with DDP/DMP.
optimizer_config: An OptimizerConfig object that specifies learning rates per tower.
Returns:
A torch.optim instance, and a scheduler instance.
"""
optimizer_fn = functools.partial(
torch.optim.Adam,
lr=_DEFAULT_LR,
betas=(optimizer_config.adam.beta_1, optimizer_config.adam.beta_2),
eps=optimizer_config.adam.epsilon,
maximize=False,
)
if optimizer_config.multi_task_learning_rates:
logging.info("***** Parameter groups for optimization *****")
# Importantly, we preserve insertion order in dictionaries here.
parameter_groups: typing.Dict[str, typing.Dict] = defaultdict(dict)
added_parameters: typing.Set[str] = set()
for task in optimizer_config.multi_task_learning_rates.tower_learning_rates:
for name, parameter in model.named_parameters():
if f".{model_mod.sanitize(task)}." in name:
parameter_groups[task][name] = parameter
logging.info(f"{task}: {name}")
if name in added_parameters:
raise ValueError(f"Parameter {name} matched multiple tasks.")
added_parameters.add(name)
for name, parameter in model.named_parameters():
if name not in added_parameters and "embedding_bags" not in name:
parameter_groups[_BACKBONE][name] = parameter
added_parameters.add(name)
logging.info(f"{_BACKBONE}: {name}")
for name, parameter in model.named_parameters():
if name not in added_parameters and "embedding_bags" in name:
parameter_groups[_DENSE_EMBEDDINGS][name] = parameter
logging.info(f"{_DENSE_EMBEDDINGS}: {name}")
all_learning_rates = optimizer_config.multi_task_learning_rates.tower_learning_rates.copy()
if optimizer_config.multi_task_learning_rates.backbone_learning_rate is not None:
all_learning_rates[
_BACKBONE
] = optimizer_config.multi_task_learning_rates.backbone_learning_rate
if _DENSE_EMBEDDINGS in parameter_groups and emb_optimizer_config:
all_learning_rates[_DENSE_EMBEDDINGS] = emb_optimizer_config.learning_rate.copy()
else:
parameter_groups = dict(model.named_parameters())
all_learning_rates = {"single_task": optimizer_config.single_task_learning_rate}
optimizers = [
keyed.KeyedOptimizerWrapper(param_group, optimizer_fn)
for param_name, param_group in parameter_groups.items()
if param_name != _DENSE_EMBEDDINGS
]
# Making EBC optimizer to be SGD to match fused optimiser
if _DENSE_EMBEDDINGS in parameter_groups:
optimizers.append(
keyed.KeyedOptimizerWrapper(
parameter_groups[_DENSE_EMBEDDINGS],
functools.partial(torch.optim.SGD, lr=_DEFAULT_LR, maximize=False, momentum=False),
)
)
if not parameter_groups.keys() == all_learning_rates.keys():
raise ValueError("Learning rates do not match optimizers")
# If the optimiser is dense, model.fused_optimizer will be empty (but not None)
emb_learning_rate = None
if hasattr(model, "fused_optimizer") and model.fused_optimizer.optimizers:
logging.info(f"Model fused optimiser: {model.fused_optimizer}")
optimizers.append(model.fused_optimizer)
if emb_optimizer_config:
emb_learning_rate = emb_optimizer_config.learning_rate.copy()
else:
raise ValueError("Fused kernel exists, but LR is not set")
logging.info(f"***** Combining optimizers: {optimizers} *****")
optimizer = keyed.CombinedOptimizer(optimizers)
scheduler = RecapLRShim(optimizer, all_learning_rates, emb_learning_rate)
logging.info(f"***** Combined optimizer after init: {optimizer} *****")
return optimizer, scheduler | Builds an optimizer and scheduler. Args: model: A torch model, probably with DDP/DMP. optimizer_config: An OptimizerConfig object that specifies learning rates per tower. Returns: A torch.optim instance, and a scheduler instance. |
22,170 | from tml.projects.home.recap.model.config import MlpConfig
import torch
from absl import logging
def _init_weights(module):
if isinstance(module, torch.nn.Linear):
torch.nn.init.xavier_uniform_(module.weight)
torch.nn.init.constant_(module.bias, 0) | null |
22,171 | from __future__ import annotations
from absl import logging
import torch
from typing import Optional, Callable, Mapping, Dict, Sequence, TYPE_CHECKING
from tml.projects.home.recap.model import feature_transform
from tml.projects.home.recap.model import config as model_config_mod
from tml.projects.home.recap.model import mlp
from tml.projects.home.recap.model import mask_net
from tml.projects.home.recap.model import numeric_calibration
from tml.projects.home.recap.model.model_and_loss import ModelAndLoss
import tml.projects.home.recap.model.config as model_config_mod
def sanitize(task_name):
return task_name.replace(".", "__") | null |
22,172 | from __future__ import annotations
from absl import logging
import torch
from typing import Optional, Callable, Mapping, Dict, Sequence, TYPE_CHECKING
from tml.projects.home.recap.model import feature_transform
from tml.projects.home.recap.model import config as model_config_mod
from tml.projects.home.recap.model import mlp
from tml.projects.home.recap.model import mask_net
from tml.projects.home.recap.model import numeric_calibration
from tml.projects.home.recap.model.model_and_loss import ModelAndLoss
import tml.projects.home.recap.model.config as model_config_mod
def unsanitize(sanitized_task_name):
return sanitized_task_name.replace("__", ".") | null |
22,173 | from __future__ import annotations
from absl import logging
import torch
from typing import Optional, Callable, Mapping, Dict, Sequence, TYPE_CHECKING
from tml.projects.home.recap.model import feature_transform
from tml.projects.home.recap.model import config as model_config_mod
from tml.projects.home.recap.model import mlp
from tml.projects.home.recap.model import mask_net
from tml.projects.home.recap.model import numeric_calibration
from tml.projects.home.recap.model.model_and_loss import ModelAndLoss
import tml.projects.home.recap.model.config as model_config_mod
The provided code snippet includes necessary dependencies for implementing the `_build_single_task_model` function. Write a Python function `def _build_single_task_model(task: model_config_mod.TaskModel, input_shape: int)` to solve the following problem:
"Builds a model for a single task
Here is the function:
def _build_single_task_model(task: model_config_mod.TaskModel, input_shape: int):
""" "Builds a model for a single task"""
if task.mlp_config:
return mlp.Mlp(in_features=input_shape, mlp_config=task.mlp_config)
elif task.dcn_config:
return dcn.Dcn(dcn_config=task.dcn_config, in_features=input_shape)
elif task.mask_net_config:
return mask_net.MaskNet(mask_net_config=task.mask_net_config, in_features=input_shape)
else:
raise ValueError("This should never be reached.") | "Builds a model for a single task |
22,174 | from __future__ import annotations
from absl import logging
import torch
from typing import Optional, Callable, Mapping, Dict, Sequence, TYPE_CHECKING
from tml.projects.home.recap.model import feature_transform
from tml.projects.home.recap.model import config as model_config_mod
from tml.projects.home.recap.model import mlp
from tml.projects.home.recap.model import mask_net
from tml.projects.home.recap.model import numeric_calibration
from tml.projects.home.recap.model.model_and_loss import ModelAndLoss
import tml.projects.home.recap.model.config as model_config_mod
class MultiTaskRankingModel(torch.nn.Module):
"""Multi-task ranking model."""
def __init__(
self,
input_shapes: Mapping[str, torch.Size],
config: ModelConfig,
data_config: RecapDataConfig,
return_backbone: bool = False,
):
"""Constructor for Multi task learning.
Assumptions made:
1. Tasks specified in data config match model architecture.
These are all validated in config.
"""
super().__init__()
self._config = config
self._data_config = data_config
self._preprocessor = feature_transform.build_features_preprocessor(
config.featurization_config, input_shapes
)
self.return_backbone = return_backbone
self.embeddings = None
self.small_embeddings = None
embedding_dims = 0
if config.large_embeddings:
from large_embeddings.models.learnable_embeddings import LargeEmbeddings
self.embeddings = LargeEmbeddings(large_embeddings_config=config.large_embeddings)
embedding_dims += sum([table.embedding_dim for table in config.large_embeddings.tables])
logging.info(f"Emb dim: {embedding_dims}")
if config.small_embeddings:
self.small_embeddings = SmallEmbedding(config.small_embeddings)
embedding_dims += sum([table.embedding_dim for table in config.small_embeddings.tables])
logging.info(f"Emb dim (with small embeddings): {embedding_dims}")
if "user_embedding" in data_config.seg_dense_schema.renamed_features:
embedding_dims += input_shapes["user_embedding"][-1]
self._user_embedding_layer_norm = torch.nn.LayerNorm(input_shapes["user_embedding"][-1])
else:
self._user_embedding_layer_norm = None
if "user_eng_embedding" in data_config.seg_dense_schema.renamed_features:
embedding_dims += input_shapes["user_eng_embedding"][-1]
self._user_eng_embedding_layer_norm = torch.nn.LayerNorm(
input_shapes["user_eng_embedding"][-1]
)
else:
self._user_eng_embedding_layer_norm = None
if "author_embedding" in data_config.seg_dense_schema.renamed_features:
embedding_dims += input_shapes["author_embedding"][-1]
self._author_embedding_layer_norm = torch.nn.LayerNorm(input_shapes["author_embedding"][-1])
else:
self._author_embedding_layer_norm = None
input_dims = input_shapes["continuous"][-1] + input_shapes["binary"][-1] + embedding_dims
if config.position_debias_config:
self.position_debias_model = PositionDebias(config.position_debias_config)
input_dims += self.position_debias_model.out_features
else:
self.position_debias_model = None
logging.info(f"input dim: {input_dims}")
if config.multi_task_type in [
model_config_mod.MultiTaskType.SHARE_ALL,
model_config_mod.MultiTaskType.SHARE_PARTIAL,
]:
self._backbone = _build_single_task_model(config.backbone, input_dims)
else:
self._backbone = None
_towers: Dict[str, torch.nn.Module] = {}
_calibrators: Dict[str, torch.nn.Module] = {}
_affine_maps: Dict[str, torch.nn.Module] = {}
for task_name, task_architecture in config.tasks.items():
safe_name = sanitize(task_name)
# Complex input dimension calculation.
if config.multi_task_type == model_config_mod.MultiTaskType.SHARE_NONE:
num_inputs = input_dims
elif config.multi_task_type == model_config_mod.MultiTaskType.SHARE_ALL:
num_inputs = self._backbone.out_features
elif config.multi_task_type == model_config_mod.MultiTaskType.SHARE_PARTIAL:
num_inputs = input_dims + self._backbone.out_features
else:
raise ValueError("Unreachable branch of enum.")
# Annoyingly, ModuleDict doesn't allow . inside key names.
_towers[safe_name] = _build_single_task_model(task_architecture, num_inputs)
if task_architecture.affine_map:
affine_map = torch.nn.Linear(1, 1)
affine_map.weight.data = torch.tensor([[task_architecture.affine_map.scale]])
affine_map.bias.data = torch.tensor([task_architecture.affine_map.bias])
_affine_maps[safe_name] = affine_map
else:
_affine_maps[safe_name] = torch.nn.Identity()
_calibrators[safe_name] = numeric_calibration.NumericCalibration(
pos_downsampling_rate=data_config.tasks[task_name].pos_downsampling_rate,
neg_downsampling_rate=data_config.tasks[task_name].neg_downsampling_rate,
)
self._task_names = list(config.tasks.keys())
self._towers = torch.nn.ModuleDict(_towers)
self._affine_maps = torch.nn.ModuleDict(_affine_maps)
self._calibrators = torch.nn.ModuleDict(_calibrators)
self._counter = torch.autograd.Variable(torch.tensor(0), requires_grad=False)
def forward(
self,
continuous_features: torch.Tensor,
binary_features: torch.Tensor,
discrete_features: Optional[torch.Tensor] = None,
sparse_features=None, # Optional[KeyedJaggedTensor]
user_embedding: Optional[torch.Tensor] = None,
user_eng_embedding: Optional[torch.Tensor] = None,
author_embedding: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
weights: Optional[torch.Tensor] = None,
):
concat_dense_features = [
self._preprocessor(continuous_features=continuous_features, binary_features=binary_features)
]
if self.embeddings:
concat_dense_features.append(self.embeddings(sparse_features))
# Twhin embedding layer norms
if self.small_embeddings:
if discrete_features is None:
raise ValueError(
"Forward arg discrete_features is None, but since small_embeddings are used, a Tensor is expected."
)
concat_dense_features.append(self.small_embeddings(discrete_features))
if self._user_embedding_layer_norm:
if user_embedding is None:
raise ValueError(
"Forward arg user_embedding is None, but since Twhin user_embeddings are used by the model, a Tensor is expected."
)
concat_dense_features.append(self._user_embedding_layer_norm(user_embedding))
if self._user_eng_embedding_layer_norm:
if user_eng_embedding is None:
raise ValueError(
"Forward arg user_eng_embedding is None, but since Twhin user_eng_embeddings are used by the model, a Tensor is expected."
)
concat_dense_features.append(self._user_eng_embedding_layer_norm(user_eng_embedding))
if self._author_embedding_layer_norm:
if author_embedding is None:
raise ValueError(
"Forward arg author_embedding is None, but since Twhin author_embeddings are used by the model, a Tensor is expected."
)
concat_dense_features.append(self._author_embedding_layer_norm(author_embedding))
if self.position_debias_model:
if discrete_features is None:
raise ValueError(
"Forward arg discrete_features is None, but since position_debias_model is used, a Tensor is expected."
)
concat_dense_features.append(self.position_debias_model(discrete_features))
if discrete_features is not None and not (self.position_debias_model or self.small_embeddings):
logging.warning("Forward arg discrete_features is passed, but never used.")
concat_dense_features = torch.cat(concat_dense_features, dim=1)
if self._backbone:
if self._config.multi_task_type == model_config_mod.MultiTaskType.SHARE_ALL:
net = self._backbone(concat_dense_features)["output"]
elif self._config.multi_task_type == model_config_mod.MultiTaskType.SHARE_PARTIAL:
net = torch.cat(
[concat_dense_features, self._backbone(concat_dense_features)["output"]], dim=1
)
else:
net = concat_dense_features
backbone_result = net
all_logits = []
all_probabilities = []
all_calibrated_probabilities = []
for task_name in self._task_names:
safe_name = sanitize(task_name)
tower_outputs = self._towers[safe_name](net)
logits = tower_outputs["output"]
scaled_logits = self._affine_maps[safe_name](logits)
probabilities = torch.sigmoid(scaled_logits)
calibrated_probabilities = self._calibrators[safe_name](probabilities)
all_logits.append(scaled_logits)
all_probabilities.append(probabilities)
all_calibrated_probabilities.append(calibrated_probabilities)
results = {
"logits": torch.squeeze(torch.stack(all_logits, dim=1), dim=-1),
"probabilities": torch.squeeze(torch.stack(all_probabilities, dim=1), dim=-1),
"calibrated_probabilities": torch.squeeze(
torch.stack(all_calibrated_probabilities, dim=1), dim=-1
),
}
# Returning the backbone is intended for stitching post-tf conversion
# Leaving this on will ~200x the size of the output
# and could slow things down
if self.return_backbone:
results["backbone"] = backbone_result
return results
def create_ranking_model(
data_spec,
# Used for planner to be batch size aware.
config: config_mod.RecapConfig,
device: torch.device,
loss_fn: Optional[Callable] = None,
data_config=None,
return_backbone=False,
):
if list(config.model.tasks.values())[0].dlrm_config:
raise NotImplementedError()
model = EmbeddingRankingModel(
input_shapes=data_spec,
config=all_config.model,
data_config=all_config.train_data,
)
else:
model = MultiTaskRankingModel(
input_shapes=data_spec,
config=config.model,
data_config=data_config if data_config is not None else config.train_data,
return_backbone=return_backbone,
)
logging.info("***** Model Architecture *****")
logging.info(model)
logging.info("***** Named Parameters *****")
for elem in model.named_parameters():
logging.info(elem[0])
if loss_fn:
logging.info("***** Wrapping in loss *****")
model = ModelAndLoss(
model=model,
loss_fn=loss_fn,
stratifiers=config.model.stratifiers,
)
return model | null |
22,175 | from typing import Mapping, Sequence, Union
from tml.projects.home.recap.model.config import (
BatchNormConfig,
DoubleNormLogConfig,
FeaturizationConfig,
LayerNormConfig,
)
import torch
The provided code snippet includes necessary dependencies for implementing the `log_transform` function. Write a Python function `def log_transform(x: torch.Tensor) -> torch.Tensor` to solve the following problem:
Safe log transform that works across both negative, zero, and positive floats.
Here is the function:
def log_transform(x: torch.Tensor) -> torch.Tensor:
"""Safe log transform that works across both negative, zero, and positive floats."""
return torch.sign(x) * torch.log1p(torch.abs(x)) | Safe log transform that works across both negative, zero, and positive floats. |
22,176 | from typing import Mapping, Sequence, Union
from tml.projects.home.recap.model.config import (
BatchNormConfig,
DoubleNormLogConfig,
FeaturizationConfig,
LayerNormConfig,
)
import torch
class DoubleNormLog(torch.nn.Module):
"""Performs a batch norm and clamp on continuous features followed by a layer norm on binary and continuous features."""
def __init__(
self,
input_shapes: Mapping[str, Sequence[int]],
config: DoubleNormLogConfig,
):
super().__init__()
_before_concat_layers = [
InputNonFinite(),
Log1pAbs(),
]
if config.batch_norm_config:
_before_concat_layers.append(
BatchNorm(input_shapes["continuous"][-1], config.batch_norm_config)
)
_before_concat_layers.append(
Clamp(min_value=-config.clip_magnitude, max_value=config.clip_magnitude)
)
self._before_concat_layers = torch.nn.Sequential(*_before_concat_layers)
self.layer_norm = None
if config.layer_norm_config:
last_dim = input_shapes["continuous"][-1] + input_shapes["binary"][-1]
self.layer_norm = LayerNorm(last_dim, config.layer_norm_config)
def forward(
self, continuous_features: torch.Tensor, binary_features: torch.Tensor
) -> torch.Tensor:
x = self._before_concat_layers(continuous_features)
x = torch.cat([x, binary_features], dim=1)
if self.layer_norm:
return self.layer_norm(x)
return x
The provided code snippet includes necessary dependencies for implementing the `build_features_preprocessor` function. Write a Python function `def build_features_preprocessor( config: FeaturizationConfig, input_shapes: Mapping[str, Sequence[int]] )` to solve the following problem:
Trivial right now, but we will change in the future.
Here is the function:
def build_features_preprocessor(
config: FeaturizationConfig, input_shapes: Mapping[str, Sequence[int]]
):
"""Trivial right now, but we will change in the future."""
return DoubleNormLog(input_shapes, config.double_norm_log_config) | Trivial right now, but we will change in the future. |
22,177 | from tml.projects.home.recap.model import config, mlp
import torch
def _init_weights(module):
if isinstance(module, torch.nn.Linear):
torch.nn.init.xavier_uniform_(module.weight)
torch.nn.init.constant_(module.bias, 0) | null |
22,178 | from typing import Callable
from tml.ml_logging.torch_logging import logging
import torch
import torch.distributed as dist
from torchrec.distributed.model_parallel import DistributedModelParallel
The provided code snippet includes necessary dependencies for implementing the `maybe_shard_model` function. Write a Python function `def maybe_shard_model( model, device: torch.device, )` to solve the following problem:
Set up and apply DistributedModelParallel to a model if running in a distributed environment. If in a distributed environment, constructs Topology, sharders, and ShardingPlan, then applies DistributedModelParallel. If not in a distributed environment, returns model directly.
Here is the function:
def maybe_shard_model(
model,
device: torch.device,
):
"""Set up and apply DistributedModelParallel to a model if running in a distributed environment.
If in a distributed environment, constructs Topology, sharders, and ShardingPlan, then applies
DistributedModelParallel.
If not in a distributed environment, returns model directly.
"""
if dist.is_initialized():
logging.info("***** Wrapping in DistributedModelParallel *****")
logging.info(f"Model before wrapping: {model}")
model = DistributedModelParallel(
module=model,
device=device,
)
logging.info(f"Model after wrapping: {model}")
return model | Set up and apply DistributedModelParallel to a model if running in a distributed environment. If in a distributed environment, constructs Topology, sharders, and ShardingPlan, then applies DistributedModelParallel. If not in a distributed environment, returns model directly. |
22,179 | from typing import Callable
from tml.ml_logging.torch_logging import logging
import torch
import torch.distributed as dist
from torchrec.distributed.model_parallel import DistributedModelParallel
The provided code snippet includes necessary dependencies for implementing the `log_sharded_tensor_content` function. Write a Python function `def log_sharded_tensor_content(weight_name: str, table_name: str, weight_tensor) -> None` to solve the following problem:
Handy function to log the content of EBC embedding layer. Only works for single GPU machines. Args: weight_name: name of tensor, as defined in model table_name: name of the EBC table the weight is taken from weight_tensor: embedding weight tensor
Here is the function:
def log_sharded_tensor_content(weight_name: str, table_name: str, weight_tensor) -> None:
"""Handy function to log the content of EBC embedding layer.
Only works for single GPU machines.
Args:
weight_name: name of tensor, as defined in model
table_name: name of the EBC table the weight is taken from
weight_tensor: embedding weight tensor
"""
logging.info(f"{weight_name}, {table_name}", rank=-1)
logging.info(f"{weight_tensor.metadata()}", rank=-1)
output_tensor = torch.zeros(*weight_tensor.size(), device=torch.device("cuda:0"))
weight_tensor.gather(out=output_tensor)
logging.info(f"{output_tensor}", rank=-1) | Handy function to log the content of EBC embedding layer. Only works for single GPU machines. Args: weight_name: name of tensor, as defined in model table_name: name of the EBC table the weight is taken from weight_tensor: embedding weight tensor |
22,180 | import yaml
import getpass
import os
import string
from typing import Tuple, Type, TypeVar
from tml.core.config import base_config
import fsspec
C = TypeVar("C", bound=base_config.BaseConfig)
def _read_file(f):
with fsspec.open(f) as f:
return f.read()
The provided code snippet includes necessary dependencies for implementing the `setup_configuration` function. Write a Python function `def setup_configuration( config_type: Type[C], yaml_path: str, substitute_env_variable: bool = False, ) -> Tuple[C, str]` to solve the following problem:
Resolves a config at a yaml path. Args: config_type: Pydantic config class to load. yaml_path: yaml path of the config file. substitute_env_variable: If True substitute string in the format $VAR or ${VAR} by their environment variable value whenever possible. If an environment variable doesn't exist, the string is left unchanged. Returns: The pydantic config object.
Here is the function:
def setup_configuration(
config_type: Type[C],
yaml_path: str,
substitute_env_variable: bool = False,
) -> Tuple[C, str]:
"""Resolves a config at a yaml path.
Args:
config_type: Pydantic config class to load.
yaml_path: yaml path of the config file.
substitute_env_variable: If True substitute string in the format $VAR or ${VAR} by their
environment variable value whenever possible. If an environment variable doesn't exist,
the string is left unchanged.
Returns:
The pydantic config object.
"""
def _substitute(s):
if substitute_env_variable:
return string.Template(s).safe_substitute(os.environ, USER=getpass.getuser())
return s
assert config_type is not None, "can't use all_config without config_type"
content = _substitute(yaml.safe_load(_read_file(yaml_path)))
return config_type.parse_obj(content) | Resolves a config at a yaml path. Args: config_type: Pydantic config class to load. yaml_path: yaml path of the config file. substitute_env_variable: If True substitute string in the format $VAR or ${VAR} by their environment variable value whenever possible. If an environment variable doesn't exist, the string is left unchanged. Returns: The pydantic config object. |
22,181 | import os
import subprocess
import sys
from typing import Optional
from tml.ml_logging.torch_logging import logging
from twitter.ml.tensorflow.experimental.distributed import utils
import torch
import torch.distributed.run
def is_distributed_worker():
world_size = os.environ.get("WORLD_SIZE", None)
rank = os.environ.get("RANK", None)
return world_size is not None and rank is not None
The provided code snippet includes necessary dependencies for implementing the `maybe_run_training` function. Write a Python function `def maybe_run_training( train_fn, module_name, nproc_per_node: Optional[int] = None, num_nodes: Optional[int] = None, set_python_path_in_subprocess: bool = False, is_chief: Optional[bool] = False, **training_kwargs, )` to solve the following problem:
Wrapper function for single node, multi-GPU Pytorch training. If the necessary distributed Pytorch environment variables (WORLD_SIZE, RANK) have been set, then this function executes `train_fn(**training_kwargs)`. Otherwise, this function calls torchrun and points at the calling module `module_name`. After this call, the necessary environment variables are set and training will commence. Args: train_fn: The function that is responsible for training module_name: The name of the module that this function was called from; used to indicate torchrun entrypoint. nproc_per_node: Number of workers per node; supported values. num_nodes: Number of nodes, otherwise inferred from environment. is_chief: If process is running on chief. set_python_path_in_subprocess: A bool denoting whether to set PYTHONPATH.
Here is the function:
def maybe_run_training(
train_fn,
module_name,
nproc_per_node: Optional[int] = None,
num_nodes: Optional[int] = None,
set_python_path_in_subprocess: bool = False,
is_chief: Optional[bool] = False,
**training_kwargs,
):
"""Wrapper function for single node, multi-GPU Pytorch training.
If the necessary distributed Pytorch environment variables
(WORLD_SIZE, RANK) have been set, then this function executes
`train_fn(**training_kwargs)`.
Otherwise, this function calls torchrun and points at the calling module
`module_name`. After this call, the necessary environment variables are set
and training will commence.
Args:
train_fn: The function that is responsible for training
module_name: The name of the module that this function was called from;
used to indicate torchrun entrypoint.
nproc_per_node: Number of workers per node; supported values.
num_nodes: Number of nodes, otherwise inferred from environment.
is_chief: If process is running on chief.
set_python_path_in_subprocess: A bool denoting whether to set PYTHONPATH.
"""
machines = utils.machine_from_env()
if num_nodes is None:
num_nodes = 1
if machines.num_workers:
num_nodes += machines.num_workers
if is_distributed_worker():
# world_size, rank, etc are set; assuming any other env vars are set (checks to come)
# start the actual training!
train_fn(**training_kwargs)
else:
if nproc_per_node is None:
if torch.cuda.is_available():
nproc_per_node = torch.cuda.device_count()
else:
nproc_per_node = machines.chief.num_accelerators
# Rejoin all arguments to send back through torchrec
# this is a temporary measure, will replace the os.system call
# with torchrun API calls
args = list(f"--{key}={val}" for key, val in training_kwargs.items())
cmd = [
"--nnodes",
str(num_nodes),
]
if nproc_per_node:
cmd.extend(["--nproc_per_node", str(nproc_per_node)])
if num_nodes > 1:
cluster_resolver = utils.cluster_resolver()
backend_address = cluster_resolver.cluster_spec().task_address("chief", 0)
cmd.extend(
[
"--rdzv_backend",
"c10d",
"--rdzv_id",
backend_address,
]
)
# Set localhost on chief because of https://github.com/pytorch/pytorch/issues/79388
if is_chief:
cmd.extend(["--rdzv_endpoint", "localhost:2222"])
else:
cmd.extend(["--rdzv_endpoint", backend_address])
else:
cmd.append("--standalone")
cmd.extend(
[
str(module_name),
*args,
]
)
logging.info(f"""Distributed running with cmd: '{" ".join(cmd)}'""")
# Call torchrun on this module; will spawn new processes and re-run this
# function, eventually calling "train_fn". The following line sets the PYTHONPATH to accommodate
# bazel stubbing for the main binary.
if set_python_path_in_subprocess:
subprocess.run(["torchrun"] + cmd, env={**os.environ, "PYTHONPATH": ":".join(sys.path)})
else:
torch.distributed.run.main(cmd) | Wrapper function for single node, multi-GPU Pytorch training. If the necessary distributed Pytorch environment variables (WORLD_SIZE, RANK) have been set, then this function executes `train_fn(**training_kwargs)`. Otherwise, this function calls torchrun and points at the calling module `module_name`. After this call, the necessary environment variables are set and training will commence. Args: train_fn: The function that is responsible for training module_name: The name of the module that this function was called from; used to indicate torchrun entrypoint. nproc_per_node: Number of workers per node; supported values. num_nodes: Number of nodes, otherwise inferred from environment. is_chief: If process is running on chief. set_python_path_in_subprocess: A bool denoting whether to set PYTHONPATH. |
22,182 | import os
import time
from typing import Any, Dict, List, Optional
from tml.ml_logging.torch_logging import logging
from tml.common.filesystem import infer_fs, is_gcs_fs
import torchsnapshot
def _eval_done_path(checkpoint_path: str, eval_partition: str) -> str:
return os.path.join(_eval_subdir(checkpoint_path), f"{eval_partition}_DONE")
def mark_done_eval(checkpoint_path: str, eval_partition: str):
infer_fs(checkpoint_path).touch(_eval_done_path(checkpoint_path, eval_partition)) | null |
22,183 | import os
import time
from typing import Any, Dict, List, Optional
from tml.ml_logging.torch_logging import logging
from tml.common.filesystem import infer_fs, is_gcs_fs
import torchsnapshot
def is_done_eval(checkpoint_path: str, eval_partition: str):
return get_checkpoint(checkpoint_path).exists(_eval_done_path(checkpoint_path, eval_partition))
def step_from_checkpoint(checkpoint: str) -> int:
return int(os.path.basename(checkpoint))
def checkpoints_iterator(save_dir: str, seconds_to_sleep: int = 30, timeout: int = 1800):
"""Simplified equivalent of tf.train.checkpoints_iterator.
Args:
seconds_to_sleep: time between polling calls.
timeout: how long to wait for a new checkpoint.
"""
def _poll(last_checkpoint: Optional[str] = None):
stop_time = time.time() + timeout
while True:
_checkpoint_path = get_checkpoint(save_dir, missing_ok=True)
if not _checkpoint_path or _checkpoint_path == last_checkpoint:
if time.time() + seconds_to_sleep > stop_time:
logging.info(
f"Timed out waiting for next available checkpoint from {save_dir} for {timeout}s."
)
return None
logging.info(f"Waiting for next available checkpoint from {save_dir}.")
time.sleep(seconds_to_sleep)
else:
logging.info(f"Found latest checkpoint {_checkpoint_path}.")
return _checkpoint_path
checkpoint_path = None
while True:
new_checkpoint = _poll(checkpoint_path)
if not new_checkpoint:
return
checkpoint_path = new_checkpoint
yield checkpoint_path
def wait_for_evaluators(
save_dir: str,
partition_names: List[str],
global_step: int,
timeout: int,
) -> None:
logging.info("Waiting for all evaluators to finish.")
start_time = time.time()
for checkpoint in checkpoints_iterator(save_dir):
step = step_from_checkpoint(checkpoint)
logging.info(f"Considering checkpoint {checkpoint} for global step {global_step}.")
if step == global_step:
while partition_names:
if is_done_eval(checkpoint, partition_names[-1]):
logging.info(
f"Checkpoint {checkpoint} marked as finished eval for partition {partition_names[-1]} at step {step}, still waiting for {partition_names}."
)
partition_names.pop()
if time.time() - start_time >= timeout:
logging.warning(
f"Not all evaluators finished after waiting for {time.time() - start_time}"
)
return
time.sleep(10)
logging.info("All evaluators finished.")
return
if time.time() - start_time >= timeout:
logging.warning(f"Not all evaluators finished after waiting for {time.time() - start_time}")
return | null |
22,184 | import itertools
from typing import Callable, Dict, List, Optional, Union
from tml.ml_logging.torch_logging import logging
import torch
import torch.distributed as dist
from torchrec.distributed.model_parallel import DistributedModelParallel
The provided code snippet includes necessary dependencies for implementing the `weights_to_log` function. Write a Python function `def weights_to_log( model: torch.nn.Module, how_to_log: Optional[Union[Callable, Dict[str, Callable]]] = None, )` to solve the following problem:
Creates dict of reduced weights to log to give sense of training. Args: model: model to traverse. how_to_log: if a function, then applies this to every parameter, if a dict then only applies and logs specified parameters.
Here is the function:
def weights_to_log(
model: torch.nn.Module,
how_to_log: Optional[Union[Callable, Dict[str, Callable]]] = None,
):
"""Creates dict of reduced weights to log to give sense of training.
Args:
model: model to traverse.
how_to_log: if a function, then applies this to every parameter, if a dict
then only applies and logs specified parameters.
"""
if not how_to_log:
return
to_log = dict()
named_parameters = model.named_parameters()
logging.info(f"Using DMP: {isinstance(model, DistributedModelParallel)}")
if isinstance(model, DistributedModelParallel):
named_parameters = itertools.chain(
named_parameters, model._dmp_wrapped_module.named_parameters()
)
logging.info(
f"Using dmp parameters: {list(name for name, _ in model._dmp_wrapped_module.named_parameters())}"
)
for param_name, params in named_parameters:
if callable(how_to_log):
how = how_to_log
else:
how = how_to_log.get(param_name) # type: ignore[assignment]
if not how:
continue # type: ignore
to_log[f"model/{how.__name__}/{param_name}"] = how(params.detach()).cpu().numpy()
return to_log | Creates dict of reduced weights to log to give sense of training. Args: model: model to traverse. how_to_log: if a function, then applies this to every parameter, if a dict then only applies and logs specified parameters. |
22,185 | import itertools
from typing import Callable, Dict, List, Optional, Union
from tml.ml_logging.torch_logging import logging
import torch
import torch.distributed as dist
from torchrec.distributed.model_parallel import DistributedModelParallel
The provided code snippet includes necessary dependencies for implementing the `log_ebc_norms` function. Write a Python function `def log_ebc_norms( model_state_dict, ebc_keys: List[str], sample_size: int = 4_000_000, ) -> Dict[str, torch.Tensor]` to solve the following problem:
Logs the norms of the embedding tables as specified by ebc_keys. As of now, log average norm per rank. Args: model_state_dict: model.state_dict() ebc_keys: list of embedding keys from state_dict to log. Must contain full name, i.e. model.embeddings.ebc.embedding_bags.meta__user_id.weight sample_size: Limits number of rows per rank to compute average on to avoid OOM.
Here is the function:
def log_ebc_norms(
model_state_dict,
ebc_keys: List[str],
sample_size: int = 4_000_000,
) -> Dict[str, torch.Tensor]:
"""Logs the norms of the embedding tables as specified by ebc_keys.
As of now, log average norm per rank.
Args:
model_state_dict: model.state_dict()
ebc_keys: list of embedding keys from state_dict to log. Must contain full name,
i.e. model.embeddings.ebc.embedding_bags.meta__user_id.weight
sample_size: Limits number of rows per rank to compute average on to avoid OOM.
"""
norm_logs = dict()
for emb_key in ebc_keys:
norms = (torch.ones(1, dtype=torch.float32) * -1).to(torch.device(f"cuda:{dist.get_rank()}"))
if emb_key in model_state_dict:
emb_weight = model_state_dict[emb_key]
try:
emb_weight_tensor = emb_weight.local_tensor()
except AttributeError as e:
logging.info(e)
emb_weight_tensor = emb_weight
logging.info("Running Tensor.detach()")
emb_weight_tensor = emb_weight_tensor.detach()
sample_mask = torch.randperm(emb_weight_tensor.shape[0])[
: min(sample_size, emb_weight_tensor.shape[0])
]
# WARNING: .cpu() transfer executes malloc that may be the cause of memory leaks
# Change sample_size if the you observe frequent OOM errors or remove weight logging.
norms = emb_weight_tensor[sample_mask].cpu().norm(dim=1).to(torch.float32)
logging.info(f"Norm shape before reduction: {norms.shape}", rank=-1)
norms = norms.mean().to(torch.device(f"cuda:{dist.get_rank()}"))
all_norms = [
torch.zeros(1, dtype=norms.dtype).to(norms.device) for _ in range(dist.get_world_size())
]
dist.all_gather(all_norms, norms)
for idx, norm in enumerate(all_norms):
if norm != -1.0:
norm_logs[f"{emb_key}-norm-{idx}"] = norm
logging.info(f"Norm Logs are {norm_logs}")
return norm_logs | Logs the norms of the embedding tables as specified by ebc_keys. As of now, log average norm per rank. Args: model_state_dict: model.state_dict() ebc_keys: list of embedding keys from state_dict to log. Must contain full name, i.e. model.embeddings.ebc.embedding_bags.meta__user_id.weight sample_size: Limits number of rows per rank to compute average on to avoid OOM. |
22,186 | from fsspec.implementations.local import LocalFileSystem
import gcsfs
GCS_FS = gcsfs.GCSFileSystem(cache_timeout=-1)
LOCAL_FS = LocalFileSystem()
def infer_fs(path: str):
if path.startswith("gs://"):
return GCS_FS
elif path.startswith("hdfs://"):
# We can probably use pyarrow HDFS to support this.
raise NotImplementedError("HDFS not yet supported")
else:
return LOCAL_FS | null |
22,187 | from fsspec.implementations.local import LocalFileSystem
import gcsfs
LOCAL_FS = LocalFileSystem()
def is_local_fs(fs):
return fs == LOCAL_FS | null |
22,188 | from fsspec.implementations.local import LocalFileSystem
import gcsfs
GCS_FS = gcsfs.GCSFileSystem(cache_timeout=-1)
def is_gcs_fs(fs):
return fs == GCS_FS | null |
22,189 | import os
import torch
import torch.distributed as dist
def maybe_setup_tensorflow():
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.config.set_visible_devices([], "GPU") # disable tf gpu
def setup_and_get_device(tf_ok: bool = True) -> torch.device:
if tf_ok:
maybe_setup_tensorflow()
device = torch.device("cpu")
backend = "gloo"
if torch.cuda.is_available():
rank = os.environ["LOCAL_RANK"]
device = torch.device(f"cuda:{rank}")
backend = "nccl"
torch.cuda.set_device(device)
if not torch.distributed.is_initialized():
dist.init_process_group(backend)
return device | null |
22,190 | import logging as py_logging
import sys
from absl import logging as logging
The provided code snippet includes necessary dependencies for implementing the `setup_absl_logging` function. Write a Python function `def setup_absl_logging()` to solve the following problem:
Make sure that absl logging pushes to stdout rather than stderr.
Here is the function:
def setup_absl_logging():
"""Make sure that absl logging pushes to stdout rather than stderr."""
logging.get_absl_handler().python_handler.stream = sys.stdout
formatter = py_logging.Formatter(
fmt="[%(module)s.%(funcName)s:%(lineno)s - %(levelname)s] %(message)s"
)
logging.get_absl_handler().setFormatter(formatter)
logging.set_verbosity(logging.INFO) | Make sure that absl logging pushes to stdout rather than stderr. |
22,191 | import functools
from typing import Optional
from tml.ml_logging.absl_logging import logging as logging
from absl import logging as absl_logging
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `rank_specific` function. Write a Python function `def rank_specific(logger)` to solve the following problem:
Ensures that we only override a given logger once.
Here is the function:
def rank_specific(logger):
"""Ensures that we only override a given logger once."""
if hasattr(logger, "_ALREADY_OVERWRITTEN_TO_BE_RANK_SPECIFIC"):
return logger
def _if_rank(logger_method, limit: Optional[int] = None):
if limit:
# If we are limiting redundant logs, wrap logging call with a cache
# to not execute if already cached.
def _wrap(_call):
@functools.lru_cache(limit)
def _logger_method(*args, **kwargs):
_call(*args, **kwargs)
return _logger_method
logger_method = _wrap(logger_method)
def _inner(msg, *args, rank: int = 0, **kwargs):
if not dist.is_initialized():
logger_method(msg, *args, **kwargs)
elif dist.get_rank() == rank:
logger_method(msg, *args, **kwargs)
elif rank < 0:
logger_method(f"Rank{dist.get_rank()}: {msg}", *args, **kwargs)
# Register this stack frame with absl logging so that it doesn't trample logging lines.
absl_logging.ABSLLogger.register_frame_to_skip(__file__, _inner.__name__)
return _inner
logger.fatal = _if_rank(logger.fatal)
logger.error = _if_rank(logger.error)
logger.warning = _if_rank(logger.warning, limit=1)
logger.info = _if_rank(logger.info)
logger.debug = _if_rank(logger.debug)
logger.exception = _if_rank(logger.exception)
logger._ALREADY_OVERWRITTEN_TO_BE_RANK_SPECIFIC = True | Ensures that we only override a given logger once. |
22,192 | from typing import List, Optional
from tml.common.filesystem import infer_fs
import fire
import pandas as pd
import pyarrow as pa
import pyarrow.dataset as pads
import pyarrow.parquet as pq
def _create_dataset(path: str):
fs = infer_fs(path)
files = fs.glob(path)
return pads.dataset(files, format="parquet", filesystem=fs) | null |
22,193 | import json
import os
from typing import List
def get_task_type():
if on_kf():
return os.environ["SPEC_TYPE"]
return os.environ["TASK_TYPE"]
def is_chief() -> bool:
return get_task_type() == "chief" | null |
22,194 | import json
import os
from typing import List
def get_task_type():
if on_kf():
return os.environ["SPEC_TYPE"]
return os.environ["TASK_TYPE"]
def is_reader() -> bool:
return get_task_type() == "datasetworker" | null |
22,195 | import json
import os
from typing import List
def get_task_type():
if on_kf():
return os.environ["SPEC_TYPE"]
return os.environ["TASK_TYPE"]
def is_dispatcher() -> bool:
return get_task_type() == "datasetdispatcher" | null |
22,196 | import json
import os
from typing import List
def has_readers():
if on_kf():
machines_config_env = json.loads(os.environ["MACHINES_CONFIG"])
return machines_config_env["dataset_worker"] is not None
return os.environ.get("HAS_READERS", "False") == "True"
def get_dds_dispatcher_address():
if not has_readers():
return None
if on_kf():
job_name = os.environ["JOB_NAME"]
dds_host = f"{job_name}-datasetdispatcher-0"
else:
dds_host = os.environ["SLURM_JOB_NODELIST_HET_GROUP_0"]
return f"{dds_host}:{get_reader_port()}"
def get_dds():
if not has_readers():
return None
dispatcher_address = get_dds_dispatcher_address()
if dispatcher_address:
return f"grpc://{dispatcher_address}"
else:
raise ValueError("Job does not have DDS.") | null |
22,197 | import json
import os
from typing import List
def on_kf():
return "SPEC_TYPE" in os.environ
def has_readers():
if on_kf():
machines_config_env = json.loads(os.environ["MACHINES_CONFIG"])
return machines_config_env["dataset_worker"] is not None
return os.environ.get("HAS_READERS", "False") == "True"
def get_task_index():
if on_kf():
pod_name = os.environ["MY_POD_NAME"]
return int(pod_name.split("-")[-1])
else:
raise NotImplementedError
def get_reader_port():
if on_kf():
return KF_DDS_PORT
return SLURM_DDS_PORT
def get_dds_worker_address():
if not has_readers():
return None
if on_kf():
job_name = os.environ["JOB_NAME"]
task_index = get_task_index()
return f"{job_name}-datasetworker-{task_index}:{get_reader_port()}"
else:
node = os.environ["SLURMD_NODENAME"]
return f"{node}:{get_reader_port()}" | null |
22,198 | import json
import os
from typing import List
FLIGHT_SERVER_PORT: int = 2222
def on_kf():
return "SPEC_TYPE" in os.environ
def get_num_readers():
if not has_readers():
return 0
if on_kf():
machines_config_env = json.loads(os.environ["MACHINES_CONFIG"])
return int(machines_config_env["num_dataset_workers"] or 0)
return len(os.environ["SLURM_JOB_NODELIST_HET_GROUP_1"].split(","))
def get_flight_server_addresses():
if on_kf():
job_name = os.environ["JOB_NAME"]
return [
f"grpc://{job_name}-datasetworker-{task_index}:{FLIGHT_SERVER_PORT}"
for task_index in range(get_num_readers())
]
else:
raise NotImplementedError | null |
22,199 | import json
import os
from typing import List
def get_dds_journaling_dir():
return os.environ.get("DATASET_JOURNALING_DIR", None) | null |
22,200 | import sys
import logging
def is_venv():
# See https://stackoverflow.com/questions/1871549/determine-if-python-is-running-inside-virtualenv
return sys.base_prefix != sys.prefix
def _main():
if is_venv():
logging.info("In venv %s", sys.prefix)
sys.exit(0)
else:
logging.error("Not in venv")
sys.exit(1) | null |
22,201 | from typing import Optional, Union, List, Tuple
import numpy as np
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.patches import Polygon
def make_lines_glow(
ax: Optional[plt.Axes] = None,
n_glow_lines: int = 10,
diff_linewidth: float = 1.05,
alpha_line: float = 0.3,
lines: Union[Line2D, List[Line2D]] = None,
) -> None:
"""Add a glow effect to the lines in an axis object.
Each existing line is redrawn several times with increasing width and low alpha to create the glow effect.
"""
if not ax:
ax = plt.gca()
lines = ax.get_lines() if lines is None else lines
lines = [lines] if isinstance(lines, Line2D) else lines
alpha_value = alpha_line / n_glow_lines
for line in lines:
data = line.get_data(orig=False)
linewidth = line.get_linewidth()
try:
step_type = line.get_drawstyle().split('-')[1]
except:
step_type = None
for n in range(1, n_glow_lines + 1):
if step_type:
glow_line, = ax.step(*data)
else:
glow_line, = ax.plot(*data)
glow_line.update_from(line) # line properties are copied as seen in this solution: https://stackoverflow.com/a/54688412/3240855
glow_line.set_alpha(alpha_value)
glow_line.set_linewidth(linewidth + (diff_linewidth * n))
glow_line.is_glow_line = True # mark the glow lines, to disregard them in the underglow function.
def add_underglow(ax: Optional[plt.Axes] = None, alpha_underglow: float = 0.1) -> None:
"""Add an 'underglow' effect, i.e. faintly color the area below the line."""
if not ax:
ax = plt.gca()
# because ax.fill_between changes axis limits, save current xy-limits to restore them later:
xlims, ylims = ax.get_xlim(), ax.get_ylim()
lines = ax.get_lines()
for line in lines:
# don't add underglow for glow effect lines:
if hasattr(line, 'is_glow_line') and line.is_glow_line:
continue
# parameters to be used from original line:
x, y = line.get_data(orig=False)
color = line.get_c()
transform = line.get_transform()
try:
step_type = line.get_drawstyle().split('-')[1]
except:
step_type = None
ax.fill_between(x=x,
y1=y,
y2=[0] * len(y),
color=color,
step=step_type,
alpha=alpha_underglow,
transform=transform)
ax.set(xlim=xlims, ylim=ylims)
def add_gradient_fill(
ax: Optional[plt.Axes] = None,
alpha_gradientglow: Union[float, Tuple[float,float]] = 1.0,
gradient_start: str = 'min',
N_sampling_points: int = 50,
) -> None:
"""
Add a gradient fill under each line, faintly coloring the area below/above the line.
PARAMETERS:
- ax
The matplolib axes, defaults to the global figure
- alpha_gradientglow
If float, the gradient is from 0 to alpha_gradientglow
If tuple[float, float], the gradient is from alpha_gradientglow[0] to alpha_gradientglow[1]
- gradient_start
Sets the point where the gradient is minimal
For aesthetic reasons, one may want the gradient to either start at:
- 'min': The minimum of each curve (default): this fills below the curve
- 'max': The maximum of each curve: this fills above the curve
- 'bottom': The bottom of the figure: this fills below the curve
- 'top': The top of the figure: this fills below the curve
- 'zero': this fills both above and below the curve
- N_sampling_points
Number of sampling points. Higher may look better at the cost of performance
"""
choices = ['min','max','top','bottom','zero']
if not gradient_start in choices:
raise ValueError(f'key must be one of {choices}')
if type(alpha_gradientglow) == float:
alpha_gradientglow = (0., alpha_gradientglow)
if not (type(alpha_gradientglow) == tuple and type(alpha_gradientglow[0]) == type(alpha_gradientglow[0]) == float):
raise ValueError(f'alpha_gradientglow must be a float or a tuple of two floats but is {alpha_gradientglow}')
if not ax:
ax = plt.gca()
# because ax.imshow changes axis limits, save current xy-limits to restore them later:
xlims, ylims = ax.get_xlim(), ax.get_ylim()
for line in ax.get_lines():
# don't add gradient fill for glow effect lines:
if hasattr(line, 'is_glow_line') and line.is_glow_line:
continue
fill_color = line.get_color()
zorder = line.get_zorder()
alpha = line.get_alpha()
alpha = 1.0 if alpha is None else alpha
rgb = mcolors.colorConverter.to_rgb(fill_color)
z = np.empty((N_sampling_points, 1, 4), dtype=float)
z[:,:,:3] = rgb
# find the visual extend of the gradient
x, y = line.get_data(orig=False)
x, y = np.array(x), np.array(y) # enforce x,y as numpy arrays
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
Ay = {'min':ymin,'max':ymax,'top':ylims[1],'bottom':ylims[0],'zero':0}[gradient_start]
extent = [xmin, xmax, min(ymin,Ay), max(ymax,Ay)]
# alpha will be linearly interpolated on scaler(y)
# {"linear","symlog","logit",...} are currentlty treated the same
if ax.get_yscale() == 'log':
if gradient_start == 'zero' : raise ValueError("key cannot be 'zero' on log plots")
scaler = np.log
else:
scaler = lambda x: x
a, b = alpha_gradientglow
ya, yb = extent[2], extent[3]
moment = lambda y : (scaler(y)-scaler(ya)) / (scaler(yb)-scaler(ya))
ys = np.linspace(ya, yb, N_sampling_points)
if gradient_start in ('min', 'bottom'):
k = moment(ys)
elif gradient_start in ('top', 'max'):
k = 1 - moment(ys)
elif gradient_start in ('zero',):
abs_ys = np.abs(ys)
k = abs_ys / np.max(abs_ys)
alphas = k*b + (1-k)*a
z[:,:,-1] = alphas[:,None]
im = ax.imshow(z,
aspect='auto',
extent=extent,
alpha=alpha,
interpolation='bilinear',
origin='lower',
zorder=zorder)
# xy = np.column_stack([x, y])
# xy = np.vstack([[xmin, Ay], xy, [xmax, Ay], [xmin, Ay]])
# clip_path = Polygon(xy, facecolor='none', edgecolor='none', closed=True)
# ax.add_patch(clip_path)
# im.set_clip_path(clip_path)
path = line.get_path()
extras = Path([[xmax,Ay],[xmin, Ay]], np.full(2, Path.MOVETO))
extras.codes[:] = Path.LINETO
path = path.make_compound_path(path, extras)
im.set_clip_path(path, line._transform)
ax.set(xlim=xlims, ylim=ylims)
The provided code snippet includes necessary dependencies for implementing the `add_glow_effects` function. Write a Python function `def add_glow_effects(ax: Optional[plt.Axes] = None, gradient_fill: bool = False) -> None` to solve the following problem:
Add a glow effect to the lines in an axis object and an 'underglow' effect below the line.
Here is the function:
def add_glow_effects(ax: Optional[plt.Axes] = None, gradient_fill: bool = False) -> None:
"""Add a glow effect to the lines in an axis object and an 'underglow' effect below the line."""
make_lines_glow(ax=ax)
if gradient_fill:
add_gradient_fill(ax=ax)
else:
add_underglow(ax=ax) | Add a glow effect to the lines in an axis object and an 'underglow' effect below the line. |
22,202 | from typing import Optional, Union, List, Tuple
import numpy as np
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.patches import Polygon
The provided code snippet includes necessary dependencies for implementing the `make_scatter_glow` function. Write a Python function `def make_scatter_glow( ax: Optional[plt.Axes] = None, n_glow_lines: int = 10, diff_dotwidth: float = 1.2, alpha: float = 0.3, ) -> None` to solve the following problem:
Add glow effect to dots in scatter plot. Each plot is redrawn 10 times with increasing width to create glow effect.
Here is the function:
def make_scatter_glow(
ax: Optional[plt.Axes] = None,
n_glow_lines: int = 10,
diff_dotwidth: float = 1.2,
alpha: float = 0.3,
) -> None:
"""Add glow effect to dots in scatter plot.
Each plot is redrawn 10 times with increasing width to create glow effect."""
if not ax:
ax = plt.gca()
scatterpoints = ax.collections[-1]
x, y = scatterpoints.get_offsets().data.T
dot_color = scatterpoints.get_array()
dot_size = scatterpoints.get_sizes()
alpha = alpha/n_glow_lines
for i in range(1, n_glow_lines):
plt.scatter(x, y, s=dot_size*(diff_dotwidth**i), c=dot_color, alpha=alpha) | Add glow effect to dots in scatter plot. Each plot is redrawn 10 times with increasing width to create glow effect. |
22,203 | from typing import Optional, Union, List, Tuple
import numpy as np
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.patches import Polygon
The provided code snippet includes necessary dependencies for implementing the `add_bar_gradient` function. Write a Python function `def add_bar_gradient( bars: mpl.container.BarContainer, ax: Optional[plt.Axes] = None, horizontal: bool = False, ) -> None` to solve the following problem:
Replace each bar with a rectangle filled with a color gradient going transparent
Here is the function:
def add_bar_gradient(
bars: mpl.container.BarContainer,
ax: Optional[plt.Axes] = None,
horizontal: bool = False,
) -> None:
"""Replace each bar with a rectangle filled with a color gradient going transparent"""
if not ax:
ax = plt.gca()
X = [[0, 1],[0, 1]] if horizontal else [[1, 1],[0, 0]]
# freeze axis limits before calling imshow
ax.axis()
ax.autoscale(False)
for bar in bars:
# get properties of existing bar
x, y = bar.get_xy()
width, height = bar.get_width(), bar.get_height()
zorder = bar.zorder
color = bar.get_facecolor()
cmap = mcolors.LinearSegmentedColormap.from_list('gradient_cmap', [(color[0], color[1], color[2], 0), color])
ax.imshow(
X=X, # pseudo-image
extent=[x, x+width, y, y+height],
cmap=cmap,
zorder=zorder,
interpolation='bicubic',
aspect='auto', # to prevent mpl from auto-scaling axes equally
)
bar.remove() | Replace each bar with a rectangle filled with a color gradient going transparent |
22,204 | import re
from typing import List, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
import logging
def _split_text_with_regex_from_end(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
if len(_splits) % 2 == 1:
splits += _splits[-1:]
# splits = [_splits[0]] + splits
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""] | null |
22,205 | from langchain.docstore.document import Document
import re
def is_possible_title(
text: str,
title_max_word_length: int = 20,
non_alpha_threshold: float = 0.5,
) -> bool:
"""Checks to see if the text passes all of the checks for a valid title.
Parameters
----------
text
The input text to check
title_max_word_length
The maximum number of words a title can contain
non_alpha_threshold
The minimum number of alpha characters the text needs to be considered a title
"""
# 文本长度为0的话,肯定不是title
if len(text) == 0:
print("Not a title. Text is empty.")
return False
# 文本中有标点符号,就不是title
ENDS_IN_PUNCT_PATTERN = r"[^\w\s]\Z"
ENDS_IN_PUNCT_RE = re.compile(ENDS_IN_PUNCT_PATTERN)
if ENDS_IN_PUNCT_RE.search(text) is not None:
return False
# 文本长度不能超过设定值,默认20
# NOTE(robinson) - splitting on spaces here instead of word tokenizing because it
# is less expensive and actual tokenization doesn't add much value for the length check
if len(text) > title_max_word_length:
return False
# 文本中数字的占比不能太高,否则不是title
if under_non_alpha_ratio(text, threshold=non_alpha_threshold):
return False
# NOTE(robinson) - Prevent flagging salutations like "To My Dearest Friends," as titles
if text.endswith((",", ".", ",", "。")):
return False
if text.isnumeric():
print(f"Not a title. Text is all numeric:\n\n{text}") # type: ignore
return False
# 开头的字符内应该有数字,默认5个字符内
if len(text) < 5:
text_5 = text
else:
text_5 = text[:5]
alpha_in_text_5 = sum(list(map(lambda x: x.isnumeric(), list(text_5))))
if not alpha_in_text_5:
return False
return True
def zh_title_enhance(docs: Document) -> Document:
title = None
if len(docs) > 0:
for doc in docs:
if is_possible_title(doc.page_content):
doc.metadata['category'] = 'cn_Title'
title = doc.page_content
elif title:
doc.page_content = f"下文与({title})有关。{doc.page_content}"
return docs
else:
print("文件不存在") | null |
22,206 | from typing import TYPE_CHECKING
def get_ocr(use_cuda: bool = True) -> "RapidOCR":
try:
from rapidocr_paddle import RapidOCR
ocr = RapidOCR(det_use_cuda=use_cuda, cls_use_cuda=use_cuda, rec_use_cuda=use_cuda)
except ImportError:
from rapidocr_onnxruntime import RapidOCR
ocr = RapidOCR()
return ocr | null |
22,207 | from pathlib import PathSCORE_THRESHOLD,
import httpx
import contextlib
import json
import os
from io import BytesIO
from server.utils import set_httpx_config, api_address, get_httpx_client
from pprint import pprint
from langchain_core._api import deprecated
The provided code snippet includes necessary dependencies for implementing the `check_error_msg` function. Write a Python function `def check_error_msg(data: Union[str, dict, list], key: str = "errorMsg") -> str` to solve the following problem:
return error message if error occured when requests API
Here is the function:
def check_error_msg(data: Union[str, dict, list], key: str = "errorMsg") -> str:
'''
return error message if error occured when requests API
'''
if isinstance(data, dict):
if key in data:
return data[key]
if "code" in data and data["code"] != 200:
return data["msg"]
return "" | return error message if error occured when requests API |
22,208 | from pathlib import PathSCORE_THRESHOLD,
import httpx
import contextlib
import json
import os
from io import BytesIO
from server.utils import set_httpx_config, api_address, get_httpx_client
from pprint import pprint
from langchain_core._api import deprecated
The provided code snippet includes necessary dependencies for implementing the `check_success_msg` function. Write a Python function `def check_success_msg(data: Union[str, dict, list], key: str = "msg") -> str` to solve the following problem:
return error message if error occured when requests API
Here is the function:
def check_success_msg(data: Union[str, dict, list], key: str = "msg") -> str:
'''
return error message if error occured when requests API
'''
if (isinstance(data, dict)
and key in data
and "code" in data
and data["code"] == 200):
return data[key]
return "" | return error message if error occured when requests API |
22,209 | import streamlit as st
from webui_pages.utils import *
from st_aggrid import AgGrid, JsCode
from st_aggrid.grid_options_builder import GridOptionsBuilder
import pandas as pd
from server.knowledge_base.utils import get_file_path, LOADER_DICT
from server.knowledge_base.kb_service.base import get_kb_details, get_kb_file_details
from typing import Literal, Dict, Tuple
from configs import (kbs_config,
EMBEDDING_MODEL, DEFAULT_VS_TYPE,
CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE)
from server.utils import list_embed_models, list_online_embed_models
import os
import time
cell_renderer = JsCode("""function(params) {if(params.value==true){return '✓'}else{return '×'}}""")
def config_aggrid(
df: pd.DataFrame,
columns: Dict[Tuple[str, str], Dict] = {},
selection_mode: Literal["single", "multiple", "disabled"] = "single",
use_checkbox: bool = False,
) -> GridOptionsBuilder:
gb = GridOptionsBuilder.from_dataframe(df)
gb.configure_column("No", width=40)
for (col, header), kw in columns.items():
gb.configure_column(col, header, wrapHeaderText=True, **kw)
gb.configure_selection(
selection_mode=selection_mode,
use_checkbox=use_checkbox,
pre_selected_rows=st.session_state.get("selected_rows", [0]),
)
gb.configure_pagination(
enabled=True,
paginationAutoPageSize=False,
paginationPageSize=10
)
return gb
def file_exists(kb: str, selected_rows: List) -> Tuple[str, str]:
"""
check whether a doc file exists in local knowledge base folder.
return the file's name and path if it exists.
"""
if selected_rows:
file_name = selected_rows[0]["file_name"]
file_path = get_file_path(kb, file_name)
if os.path.isfile(file_path):
return file_name, file_path
return "", ""
LOADER_DICT = {"UnstructuredHTMLLoader": ['.html', '.htm'],
"MHTMLLoader": ['.mhtml'],
"UnstructuredMarkdownLoader": ['.md'],
"JSONLoader": [".json"],
"JSONLinesLoader": [".jsonl"],
"CSVLoader": [".csv"],
# "FilteredCSVLoader": [".csv"], 如果使用自定义分割csv
"RapidOCRPDFLoader": [".pdf"],
"RapidOCRDocLoader": ['.docx', '.doc'],
"RapidOCRPPTLoader": ['.ppt', '.pptx', ],
"RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'],
"UnstructuredFileLoader": ['.eml', '.msg', '.rst',
'.rtf', '.txt', '.xml',
'.epub', '.odt','.tsv'],
"UnstructuredEmailLoader": ['.eml', '.msg'],
"UnstructuredEPubLoader": ['.epub'],
"UnstructuredExcelLoader": ['.xlsx', '.xls', '.xlsd'],
"NotebookLoader": ['.ipynb'],
"UnstructuredODTLoader": ['.odt'],
"PythonLoader": ['.py'],
"UnstructuredRSTLoader": ['.rst'],
"UnstructuredRTFLoader": ['.rtf'],
"SRTLoader": ['.srt'],
"TomlLoader": ['.toml'],
"UnstructuredTSVLoader": ['.tsv'],
"UnstructuredWordDocumentLoader": ['.docx', '.doc'],
"UnstructuredXMLLoader": ['.xml'],
"UnstructuredPowerPointLoader": ['.ppt', '.pptx'],
"EverNoteLoader": ['.enex'],
}
def get_kb_details() -> List[Dict]:
kbs_in_folder = list_kbs_from_folder()
kbs_in_db = KBService.list_kbs()
result = {}
for kb in kbs_in_folder:
result[kb] = {
"kb_name": kb,
"vs_type": "",
"kb_info": "",
"embed_model": "",
"file_count": 0,
"create_time": None,
"in_folder": True,
"in_db": False,
}
for kb in kbs_in_db:
kb_detail = get_kb_detail(kb)
if kb_detail:
kb_detail["in_db"] = True
if kb in result:
result[kb].update(kb_detail)
else:
kb_detail["in_folder"] = False
result[kb] = kb_detail
data = []
for i, v in enumerate(result.values()):
v['No'] = i + 1
data.append(v)
return data
def get_kb_file_details(kb_name: str) -> List[Dict]:
kb = KBServiceFactory.get_service_by_name(kb_name)
if kb is None:
return []
files_in_folder = list_files_from_folder(kb_name)
files_in_db = kb.list_files()
result = {}
for doc in files_in_folder:
result[doc] = {
"kb_name": kb_name,
"file_name": doc,
"file_ext": os.path.splitext(doc)[-1],
"file_version": 0,
"document_loader": "",
"docs_count": 0,
"text_splitter": "",
"create_time": None,
"in_folder": True,
"in_db": False,
}
lower_names = {x.lower(): x for x in result}
for doc in files_in_db:
doc_detail = get_file_detail(kb_name, doc)
if doc_detail:
doc_detail["in_db"] = True
if doc.lower() in lower_names:
result[lower_names[doc.lower()]].update(doc_detail)
else:
doc_detail["in_folder"] = False
result[doc] = doc_detail
data = []
for i, v in enumerate(result.values()):
v['No'] = i + 1
data.append(v)
return data
def knowledge_base_page(api: ApiRequest, is_lite: bool = None):
try:
kb_list = {x["kb_name"]: x for x in get_kb_details()}
except Exception as e:
st.error(
"获取知识库信息错误,请检查是否已按照 `README.md` 中 `4 知识库初始化与迁移` 步骤完成初始化或迁移,或是否为数据库连接错误。")
st.stop()
kb_names = list(kb_list.keys())
if "selected_kb_name" in st.session_state and st.session_state["selected_kb_name"] in kb_names:
selected_kb_index = kb_names.index(st.session_state["selected_kb_name"])
else:
selected_kb_index = 0
if "selected_kb_info" not in st.session_state:
st.session_state["selected_kb_info"] = ""
def format_selected_kb(kb_name: str) -> str:
if kb := kb_list.get(kb_name):
return f"{kb_name} ({kb['vs_type']} @ {kb['embed_model']})"
else:
return kb_name
selected_kb = st.selectbox(
"请选择或新建知识库:",
kb_names + ["新建知识库"],
format_func=format_selected_kb,
index=selected_kb_index
)
if selected_kb == "新建知识库":
with st.form("新建知识库"):
kb_name = st.text_input(
"新建知识库名称",
placeholder="新知识库名称,不支持中文命名",
key="kb_name",
)
kb_info = st.text_input(
"知识库简介",
placeholder="知识库简介,方便Agent查找",
key="kb_info",
)
cols = st.columns(2)
vs_types = list(kbs_config.keys())
vs_type = cols[0].selectbox(
"向量库类型",
vs_types,
index=vs_types.index(DEFAULT_VS_TYPE),
key="vs_type",
)
if is_lite:
embed_models = list_online_embed_models()
else:
embed_models = list_embed_models() + list_online_embed_models()
embed_model = cols[1].selectbox(
"Embedding 模型",
embed_models,
index=embed_models.index(EMBEDDING_MODEL),
key="embed_model",
)
submit_create_kb = st.form_submit_button(
"新建",
# disabled=not bool(kb_name),
use_container_width=True,
)
if submit_create_kb:
if not kb_name or not kb_name.strip():
st.error(f"知识库名称不能为空!")
elif kb_name in kb_list:
st.error(f"名为 {kb_name} 的知识库已经存在!")
else:
ret = api.create_knowledge_base(
knowledge_base_name=kb_name,
vector_store_type=vs_type,
embed_model=embed_model,
)
st.toast(ret.get("msg", " "))
st.session_state["selected_kb_name"] = kb_name
st.session_state["selected_kb_info"] = kb_info
st.rerun()
elif selected_kb:
kb = selected_kb
st.session_state["selected_kb_info"] = kb_list[kb]['kb_info']
# 上传文件
files = st.file_uploader("上传知识文件:",
[i for ls in LOADER_DICT.values() for i in ls],
accept_multiple_files=True,
)
kb_info = st.text_area("请输入知识库介绍:", value=st.session_state["selected_kb_info"], max_chars=None,
key=None,
help=None, on_change=None, args=None, kwargs=None)
if kb_info != st.session_state["selected_kb_info"]:
st.session_state["selected_kb_info"] = kb_info
api.update_kb_info(kb, kb_info)
# with st.sidebar:
with st.expander(
"文件处理配置",
expanded=True,
):
cols = st.columns(3)
chunk_size = cols[0].number_input("单段文本最大长度:", 1, 1000, CHUNK_SIZE)
chunk_overlap = cols[1].number_input("相邻文本重合长度:", 0, chunk_size, OVERLAP_SIZE)
cols[2].write("")
cols[2].write("")
zh_title_enhance = cols[2].checkbox("开启中文标题加强", ZH_TITLE_ENHANCE)
if st.button(
"添加文件到知识库",
# use_container_width=True,
disabled=len(files) == 0,
):
ret = api.upload_kb_docs(files,
knowledge_base_name=kb,
override=True,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
zh_title_enhance=zh_title_enhance)
if msg := check_success_msg(ret):
st.toast(msg, icon="✔")
elif msg := check_error_msg(ret):
st.toast(msg, icon="✖")
st.divider()
# 知识库详情
# st.info("请选择文件,点击按钮进行操作。")
doc_details = pd.DataFrame(get_kb_file_details(kb))
selected_rows = []
if not len(doc_details):
st.info(f"知识库 `{kb}` 中暂无文件")
else:
st.write(f"知识库 `{kb}` 中已有文件:")
st.info("知识库中包含源文件与向量库,请从下表中选择文件后操作")
doc_details.drop(columns=["kb_name"], inplace=True)
doc_details = doc_details[[
"No", "file_name", "document_loader", "text_splitter", "docs_count", "in_folder", "in_db",
]]
doc_details["in_folder"] = doc_details["in_folder"].replace(True, "✓").replace(False, "×")
doc_details["in_db"] = doc_details["in_db"].replace(True, "✓").replace(False, "×")
gb = config_aggrid(
doc_details,
{
("No", "序号"): {},
("file_name", "文档名称"): {},
# ("file_ext", "文档类型"): {},
# ("file_version", "文档版本"): {},
("document_loader", "文档加载器"): {},
("docs_count", "文档数量"): {},
("text_splitter", "分词器"): {},
# ("create_time", "创建时间"): {},
("in_folder", "源文件"): {"cellRenderer": cell_renderer},
("in_db", "向量库"): {"cellRenderer": cell_renderer},
},
"multiple",
)
doc_grid = AgGrid(
doc_details,
gb.build(),
columns_auto_size_mode="FIT_CONTENTS",
theme="alpine",
custom_css={
"#gridToolBar": {"display": "none"},
},
allow_unsafe_jscode=True,
enable_enterprise_modules=False
)
selected_rows = doc_grid.get("selected_rows", [])
cols = st.columns(4)
file_name, file_path = file_exists(kb, selected_rows)
if file_path:
with open(file_path, "rb") as fp:
cols[0].download_button(
"下载选中文档",
fp,
file_name=file_name,
use_container_width=True, )
else:
cols[0].download_button(
"下载选中文档",
"",
disabled=True,
use_container_width=True, )
st.write()
# 将文件分词并加载到向量库中
if cols[1].button(
"重新添加至向量库" if selected_rows and (
pd.DataFrame(selected_rows)["in_db"]).any() else "添加至向量库",
disabled=not file_exists(kb, selected_rows)[0],
use_container_width=True,
):
file_names = [row["file_name"] for row in selected_rows]
api.update_kb_docs(kb,
file_names=file_names,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
zh_title_enhance=zh_title_enhance)
st.rerun()
# 将文件从向量库中删除,但不删除文件本身。
if cols[2].button(
"从向量库删除",
disabled=not (selected_rows and selected_rows[0]["in_db"]),
use_container_width=True,
):
file_names = [row["file_name"] for row in selected_rows]
api.delete_kb_docs(kb, file_names=file_names)
st.rerun()
if cols[3].button(
"从知识库中删除",
type="primary",
use_container_width=True,
):
file_names = [row["file_name"] for row in selected_rows]
api.delete_kb_docs(kb, file_names=file_names, delete_content=True)
st.rerun()
st.divider()
cols = st.columns(3)
if cols[0].button(
"依据源文件重建向量库",
help="无需上传文件,通过其它方式将文档拷贝到对应知识库content目录下,点击本按钮即可重建知识库。",
use_container_width=True,
type="primary",
):
with st.spinner("向量库重构中,请耐心等待,勿刷新或关闭页面。"):
empty = st.empty()
empty.progress(0.0, "")
for d in api.recreate_vector_store(kb,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
zh_title_enhance=zh_title_enhance):
if msg := check_error_msg(d):
st.toast(msg)
else:
empty.progress(d["finished"] / d["total"], d["msg"])
st.rerun()
if cols[2].button(
"删除知识库",
use_container_width=True,
):
ret = api.delete_knowledge_base(kb)
st.toast(ret.get("msg", " "))
time.sleep(1)
st.rerun()
with st.sidebar:
keyword = st.text_input("查询关键字")
top_k = st.slider("匹配条数", 1, 100, 3)
st.write("文件内文档列表。双击进行修改,在删除列填入 Y 可删除对应行。")
docs = []
df = pd.DataFrame([], columns=["seq", "id", "content", "source"])
if selected_rows:
file_name = selected_rows[0]["file_name"]
docs = api.search_kb_docs(knowledge_base_name=selected_kb, file_name=file_name)
data = [
{"seq": i + 1, "id": x["id"], "page_content": x["page_content"], "source": x["metadata"].get("source"),
"type": x["type"],
"metadata": json.dumps(x["metadata"], ensure_ascii=False),
"to_del": "",
} for i, x in enumerate(docs)]
df = pd.DataFrame(data)
gb = GridOptionsBuilder.from_dataframe(df)
gb.configure_columns(["id", "source", "type", "metadata"], hide=True)
gb.configure_column("seq", "No.", width=50)
gb.configure_column("page_content", "内容", editable=True, autoHeight=True, wrapText=True, flex=1,
cellEditor="agLargeTextCellEditor", cellEditorPopup=True)
gb.configure_column("to_del", "删除", editable=True, width=50, wrapHeaderText=True,
cellEditor="agCheckboxCellEditor", cellRender="agCheckboxCellRenderer")
gb.configure_selection()
edit_docs = AgGrid(df, gb.build())
if st.button("保存更改"):
origin_docs = {
x["id"]: {"page_content": x["page_content"], "type": x["type"], "metadata": x["metadata"]} for x in
docs}
changed_docs = []
for index, row in edit_docs.data.iterrows():
origin_doc = origin_docs[row["id"]]
if row["page_content"] != origin_doc["page_content"]:
if row["to_del"] not in ["Y", "y", 1]:
changed_docs.append({
"page_content": row["page_content"],
"type": row["type"],
"metadata": json.loads(row["metadata"]),
})
if changed_docs:
if api.update_kb_docs(knowledge_base_name=selected_kb,
file_names=[file_name],
docs={file_name: changed_docs}):
st.toast("更新文档成功")
else:
st.toast("更新文档失败") | null |
22,210 | import streamlit as st
from webui_pages.utils import *
def model_config_page(api: ApiRequest):
pass | null |
22,211 | import streamlit as st
from webui_pages.utils import *
from streamlit_chatbox import *
from streamlit_modal import Modal
from datetime import datetime
import os
import re
import time
from configs import (TEMPERATURE, HISTORY_LEN, PROMPT_TEMPLATES, LLM_MODELS,
DEFAULT_KNOWLEDGE_BASE, DEFAULT_SEARCH_ENGINE, SUPPORT_AGENT_MODEL)
from server.knowledge_base.utils import LOADER_DICT
import uuid
from typing import List, Dict
chat_box = ChatBox(
assistant_avatar=os.path.join(
"img",
"chatchat_icon_blue_square_v2.png"
)
)
def get_messages_history(history_len: int, content_in_expander: bool = False) -> List[Dict]:
'''
返回消息历史。
content_in_expander控制是否返回expander元素中的内容,一般导出的时候可以选上,传入LLM的history不需要
'''
def filter(msg):
content = [x for x in msg["elements"] if x._output_method in ["markdown", "text"]]
if not content_in_expander:
content = [x for x in content if not x._in_expander]
content = [x.content for x in content]
return {
"role": msg["role"],
"content": "\n\n".join(content),
}
return chat_box.filter_history(history_len=history_len, filter=filter)
def upload_temp_docs(files, _api: ApiRequest) -> str:
'''
将文件上传到临时目录,用于文件对话
返回临时向量库ID
'''
return _api.upload_temp_docs(files).get("data", {}).get("id")
def parse_command(text: str, modal: Modal) -> bool:
'''
检查用户是否输入了自定义命令,当前支持:
/new {session_name}。如果未提供名称,默认为“会话X”
/del {session_name}。如果未提供名称,在会话数量>1的情况下,删除当前会话。
/clear {session_name}。如果未提供名称,默认清除当前会话
/help。查看命令帮助
返回值:输入的是命令返回True,否则返回False
'''
if m := re.match(r"/([^\s]+)\s*(.*)", text):
cmd, name = m.groups()
name = name.strip()
conv_names = chat_box.get_chat_names()
if cmd == "help":
modal.open()
elif cmd == "new":
if not name:
i = 1
while True:
name = f"会话{i}"
if name not in conv_names:
break
i += 1
if name in st.session_state["conversation_ids"]:
st.error(f"该会话名称 “{name}” 已存在")
time.sleep(1)
else:
st.session_state["conversation_ids"][name] = uuid.uuid4().hex
st.session_state["cur_conv_name"] = name
elif cmd == "del":
name = name or st.session_state.get("cur_conv_name")
if len(conv_names) == 1:
st.error("这是最后一个会话,无法删除")
time.sleep(1)
elif not name or name not in st.session_state["conversation_ids"]:
st.error(f"无效的会话名称:“{name}”")
time.sleep(1)
else:
st.session_state["conversation_ids"].pop(name, None)
chat_box.del_chat_name(name)
st.session_state["cur_conv_name"] = ""
elif cmd == "clear":
chat_box.reset_history(name=name or None)
return True
return False
LOADER_DICT = {"UnstructuredHTMLLoader": ['.html', '.htm'],
"MHTMLLoader": ['.mhtml'],
"UnstructuredMarkdownLoader": ['.md'],
"JSONLoader": [".json"],
"JSONLinesLoader": [".jsonl"],
"CSVLoader": [".csv"],
# "FilteredCSVLoader": [".csv"], 如果使用自定义分割csv
"RapidOCRPDFLoader": [".pdf"],
"RapidOCRDocLoader": ['.docx', '.doc'],
"RapidOCRPPTLoader": ['.ppt', '.pptx', ],
"RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'],
"UnstructuredFileLoader": ['.eml', '.msg', '.rst',
'.rtf', '.txt', '.xml',
'.epub', '.odt','.tsv'],
"UnstructuredEmailLoader": ['.eml', '.msg'],
"UnstructuredEPubLoader": ['.epub'],
"UnstructuredExcelLoader": ['.xlsx', '.xls', '.xlsd'],
"NotebookLoader": ['.ipynb'],
"UnstructuredODTLoader": ['.odt'],
"PythonLoader": ['.py'],
"UnstructuredRSTLoader": ['.rst'],
"UnstructuredRTFLoader": ['.rtf'],
"SRTLoader": ['.srt'],
"TomlLoader": ['.toml'],
"UnstructuredTSVLoader": ['.tsv'],
"UnstructuredWordDocumentLoader": ['.docx', '.doc'],
"UnstructuredXMLLoader": ['.xml'],
"UnstructuredPowerPointLoader": ['.ppt', '.pptx'],
"EverNoteLoader": ['.enex'],
}
def dialogue_page(api: ApiRequest, is_lite: bool = False):
st.session_state.setdefault("conversation_ids", {})
st.session_state["conversation_ids"].setdefault(chat_box.cur_chat_name, uuid.uuid4().hex)
st.session_state.setdefault("file_chat_id", None)
default_model = api.get_default_llm_model()[0]
if not chat_box.chat_inited:
st.toast(
f"欢迎使用 [`Langchain-Chatchat`](https://github.com/chatchat-space/Langchain-Chatchat) ! \n\n"
f"当前运行的模型`{default_model}`, 您可以开始提问了."
)
chat_box.init_session()
# 弹出自定义命令帮助信息
modal = Modal("自定义命令", key="cmd_help", max_width="500")
if modal.is_open():
with modal.container():
cmds = [x for x in parse_command.__doc__.split("\n") if x.strip().startswith("/")]
st.write("\n\n".join(cmds))
with st.sidebar:
# 多会话
conv_names = list(st.session_state["conversation_ids"].keys())
index = 0
if st.session_state.get("cur_conv_name") in conv_names:
index = conv_names.index(st.session_state.get("cur_conv_name"))
conversation_name = st.selectbox("当前会话:", conv_names, index=index)
chat_box.use_chat_name(conversation_name)
conversation_id = st.session_state["conversation_ids"][conversation_name]
def on_mode_change():
mode = st.session_state.dialogue_mode
text = f"已切换到 {mode} 模式。"
if mode == "知识库问答":
cur_kb = st.session_state.get("selected_kb")
if cur_kb:
text = f"{text} 当前知识库: `{cur_kb}`。"
st.toast(text)
dialogue_modes = ["LLM 对话",
"知识库问答",
"文件对话",
"搜索引擎问答",
"自定义Agent问答",
]
dialogue_mode = st.selectbox("请选择对话模式:",
dialogue_modes,
index=0,
on_change=on_mode_change,
key="dialogue_mode",
)
def on_llm_change():
if llm_model:
config = api.get_model_config(llm_model)
if not config.get("online_api"): # 只有本地model_worker可以切换模型
st.session_state["prev_llm_model"] = llm_model
st.session_state["cur_llm_model"] = st.session_state.llm_model
def llm_model_format_func(x):
if x in running_models:
return f"{x} (Running)"
return x
running_models = list(api.list_running_models())
available_models = []
config_models = api.list_config_models()
if not is_lite:
for k, v in config_models.get("local", {}).items():
if (v.get("model_path_exists")
and k not in running_models):
available_models.append(k)
for k, v in config_models.get("online", {}).items():
if not v.get("provider") and k not in running_models and k in LLM_MODELS:
available_models.append(k)
llm_models = running_models + available_models
cur_llm_model = st.session_state.get("cur_llm_model", default_model)
if cur_llm_model in llm_models:
index = llm_models.index(cur_llm_model)
else:
index = 0
llm_model = st.selectbox("选择LLM模型:",
llm_models,
index,
format_func=llm_model_format_func,
on_change=on_llm_change,
key="llm_model",
)
if (st.session_state.get("prev_llm_model") != llm_model
and not is_lite
and not llm_model in config_models.get("online", {})
and not llm_model in config_models.get("langchain", {})
and llm_model not in running_models):
with st.spinner(f"正在加载模型: {llm_model},请勿进行操作或刷新页面"):
prev_model = st.session_state.get("prev_llm_model")
r = api.change_llm_model(prev_model, llm_model)
if msg := check_error_msg(r):
st.error(msg)
elif msg := check_success_msg(r):
st.success(msg)
st.session_state["prev_llm_model"] = llm_model
index_prompt = {
"LLM 对话": "llm_chat",
"自定义Agent问答": "agent_chat",
"搜索引擎问答": "search_engine_chat",
"知识库问答": "knowledge_base_chat",
"文件对话": "knowledge_base_chat",
}
prompt_templates_kb_list = list(PROMPT_TEMPLATES[index_prompt[dialogue_mode]].keys())
prompt_template_name = prompt_templates_kb_list[0]
if "prompt_template_select" not in st.session_state:
st.session_state.prompt_template_select = prompt_templates_kb_list[0]
def prompt_change():
text = f"已切换为 {prompt_template_name} 模板。"
st.toast(text)
prompt_template_select = st.selectbox(
"请选择Prompt模板:",
prompt_templates_kb_list,
index=0,
on_change=prompt_change,
key="prompt_template_select",
)
prompt_template_name = st.session_state.prompt_template_select
temperature = st.slider("Temperature:", 0.0, 2.0, TEMPERATURE, 0.05)
history_len = st.number_input("历史对话轮数:", 0, 20, HISTORY_LEN)
def on_kb_change():
st.toast(f"已加载知识库: {st.session_state.selected_kb}")
if dialogue_mode == "知识库问答":
with st.expander("知识库配置", True):
kb_list = api.list_knowledge_bases()
index = 0
if DEFAULT_KNOWLEDGE_BASE in kb_list:
index = kb_list.index(DEFAULT_KNOWLEDGE_BASE)
selected_kb = st.selectbox(
"请选择知识库:",
kb_list,
index=index,
on_change=on_kb_change,
key="selected_kb",
)
kb_top_k = st.number_input("匹配知识条数:", 1, 20, VECTOR_SEARCH_TOP_K)
## Bge 模型会超过1
score_threshold = st.slider("知识匹配分数阈值:", 0.0, 2.0, float(SCORE_THRESHOLD), 0.01)
elif dialogue_mode == "文件对话":
with st.expander("文件对话配置", True):
files = st.file_uploader("上传知识文件:",
[i for ls in LOADER_DICT.values() for i in ls],
accept_multiple_files=True,
)
kb_top_k = st.number_input("匹配知识条数:", 1, 20, VECTOR_SEARCH_TOP_K)
## Bge 模型会超过1
score_threshold = st.slider("知识匹配分数阈值:", 0.0, 2.0, float(SCORE_THRESHOLD), 0.01)
if st.button("开始上传", disabled=len(files) == 0):
st.session_state["file_chat_id"] = upload_temp_docs(files, api)
elif dialogue_mode == "搜索引擎问答":
search_engine_list = api.list_search_engines()
if DEFAULT_SEARCH_ENGINE in search_engine_list:
index = search_engine_list.index(DEFAULT_SEARCH_ENGINE)
else:
index = search_engine_list.index("duckduckgo") if "duckduckgo" in search_engine_list else 0
with st.expander("搜索引擎配置", True):
search_engine = st.selectbox(
label="请选择搜索引擎",
options=search_engine_list,
index=index,
)
se_top_k = st.number_input("匹配搜索结果条数:", 1, 20, SEARCH_ENGINE_TOP_K)
# Display chat messages from history on app rerun
chat_box.output_messages()
chat_input_placeholder = "请输入对话内容,换行请使用Shift+Enter。输入/help查看自定义命令 "
def on_feedback(
feedback,
message_id: str = "",
history_index: int = -1,
):
reason = feedback["text"]
score_int = chat_box.set_feedback(feedback=feedback, history_index=history_index)
api.chat_feedback(message_id=message_id,
score=score_int,
reason=reason)
st.session_state["need_rerun"] = True
feedback_kwargs = {
"feedback_type": "thumbs",
"optional_text_label": "欢迎反馈您打分的理由",
}
if prompt := st.chat_input(chat_input_placeholder, key="prompt"):
if parse_command(text=prompt, modal=modal): # 用户输入自定义命令
st.rerun()
else:
history = get_messages_history(history_len)
chat_box.user_say(prompt)
if dialogue_mode == "LLM 对话":
chat_box.ai_say("正在思考...")
text = ""
message_id = ""
r = api.chat_chat(prompt,
history=history,
conversation_id=conversation_id,
model=llm_model,
prompt_name=prompt_template_name,
temperature=temperature)
for t in r:
if error_msg := check_error_msg(t): # check whether error occured
st.error(error_msg)
break
text += t.get("text", "")
chat_box.update_msg(text)
message_id = t.get("message_id", "")
metadata = {
"message_id": message_id,
}
chat_box.update_msg(text, streaming=False, metadata=metadata) # 更新最终的字符串,去除光标
chat_box.show_feedback(**feedback_kwargs,
key=message_id,
on_submit=on_feedback,
kwargs={"message_id": message_id, "history_index": len(chat_box.history) - 1})
elif dialogue_mode == "自定义Agent问答":
if not any(agent in llm_model for agent in SUPPORT_AGENT_MODEL):
chat_box.ai_say([
f"正在思考... \n\n <span style='color:red'>该模型并没有进行Agent对齐,请更换支持Agent的模型获得更好的体验!</span>\n\n\n",
Markdown("...", in_expander=True, title="思考过程", state="complete"),
])
else:
chat_box.ai_say([
f"正在思考...",
Markdown("...", in_expander=True, title="思考过程", state="complete"),
])
text = ""
ans = ""
for d in api.agent_chat(prompt,
history=history,
model=llm_model,
prompt_name=prompt_template_name,
temperature=temperature,
):
try:
d = json.loads(d)
except:
pass
if error_msg := check_error_msg(d): # check whether error occured
st.error(error_msg)
if chunk := d.get("answer"):
text += chunk
chat_box.update_msg(text, element_index=1)
if chunk := d.get("final_answer"):
ans += chunk
chat_box.update_msg(ans, element_index=0)
if chunk := d.get("tools"):
text += "\n\n".join(d.get("tools", []))
chat_box.update_msg(text, element_index=1)
chat_box.update_msg(ans, element_index=0, streaming=False)
chat_box.update_msg(text, element_index=1, streaming=False)
elif dialogue_mode == "知识库问答":
chat_box.ai_say([
f"正在查询知识库 `{selected_kb}` ...",
Markdown("...", in_expander=True, title="知识库匹配结果", state="complete"),
])
text = ""
for d in api.knowledge_base_chat(prompt,
knowledge_base_name=selected_kb,
top_k=kb_top_k,
score_threshold=score_threshold,
history=history,
model=llm_model,
prompt_name=prompt_template_name,
temperature=temperature):
if error_msg := check_error_msg(d): # check whether error occured
st.error(error_msg)
elif chunk := d.get("answer"):
text += chunk
chat_box.update_msg(text, element_index=0)
chat_box.update_msg(text, element_index=0, streaming=False)
chat_box.update_msg("\n\n".join(d.get("docs", [])), element_index=1, streaming=False)
elif dialogue_mode == "文件对话":
if st.session_state["file_chat_id"] is None:
st.error("请先上传文件再进行对话")
st.stop()
chat_box.ai_say([
f"正在查询文件 `{st.session_state['file_chat_id']}` ...",
Markdown("...", in_expander=True, title="文件匹配结果", state="complete"),
])
text = ""
for d in api.file_chat(prompt,
knowledge_id=st.session_state["file_chat_id"],
top_k=kb_top_k,
score_threshold=score_threshold,
history=history,
model=llm_model,
prompt_name=prompt_template_name,
temperature=temperature):
if error_msg := check_error_msg(d): # check whether error occured
st.error(error_msg)
elif chunk := d.get("answer"):
text += chunk
chat_box.update_msg(text, element_index=0)
chat_box.update_msg(text, element_index=0, streaming=False)
chat_box.update_msg("\n\n".join(d.get("docs", [])), element_index=1, streaming=False)
elif dialogue_mode == "搜索引擎问答":
chat_box.ai_say([
f"正在执行 `{search_engine}` 搜索...",
Markdown("...", in_expander=True, title="网络搜索结果", state="complete"),
])
text = ""
for d in api.search_engine_chat(prompt,
search_engine_name=search_engine,
top_k=se_top_k,
history=history,
model=llm_model,
prompt_name=prompt_template_name,
temperature=temperature,
split_result=se_top_k > 1):
if error_msg := check_error_msg(d): # check whether error occured
st.error(error_msg)
elif chunk := d.get("answer"):
text += chunk
chat_box.update_msg(text, element_index=0)
chat_box.update_msg(text, element_index=0, streaming=False)
chat_box.update_msg("\n\n".join(d.get("docs", [])), element_index=1, streaming=False)
if st.session_state.get("need_rerun"):
st.session_state["need_rerun"] = False
st.rerun()
now = datetime.now()
with st.sidebar:
cols = st.columns(2)
export_btn = cols[0]
if cols[1].button(
"清空对话",
use_container_width=True,
):
chat_box.reset_history()
st.rerun()
export_btn.download_button(
"导出记录",
"".join(chat_box.export2md()),
file_name=f"{now:%Y-%m-%d %H.%M}_对话记录.md",
mime="text/markdown",
use_container_width=True,
) | null |
22,212 |
def get_latest_tag():
output = subprocess.check_output(['git', 'tag'])
tags = output.decode('utf-8').split('\n')[:-1]
latest_tag = sorted(tags, key=lambda t: tuple(map(int, re.match(r'v(\d+)\.(\d+)\.(\d+)', t).groups())))[-1]
return latest_tag | null |
22,213 |
def update_version_number(latest_tag, increment):
major, minor, patch = map(int, re.match(r'v(\d+)\.(\d+)\.(\d+)', latest_tag).groups())
if increment == 'X':
major += 1
minor, patch = 0, 0
elif increment == 'Y':
minor += 1
patch = 0
elif increment == 'Z':
patch += 1
new_version = f"v{major}.{minor}.{patch}"
return new_version | null |
22,214 | import asyncio
import multiprocessing as mp
import os
import subprocess
import sys
from multiprocessing import Process
from datetime import datetime
from pprint import pprint
from langchain_core._api import deprecated
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from configs import (
LOG_PATH,
log_verbose,
logger,
LLM_MODELS,
EMBEDDING_MODEL,
TEXT_SPLITTER_NAME,
FSCHAT_CONTROLLER,
FSCHAT_OPENAI_API,
FSCHAT_MODEL_WORKERS,
API_SERVER,
WEBUI_SERVER,
HTTPX_DEFAULT_TIMEOUT,
)
from server.utils import (fschat_controller_address, fschat_model_worker_address,
fschat_openai_api_address, get_httpx_client, get_model_worker_config,
MakeFastAPIOffline, FastAPI, llm_device, embedding_device)
from server.knowledge_base.migrate import create_tables
import argparse
from typing import List, Dict
from configs import VERSION
def run_controller(log_level: str = "INFO", started_event: mp.Event = None):
import uvicorn
import httpx
from fastapi import Body
import time
import sys
from server.utils import set_httpx_config
set_httpx_config()
app = create_controller_app(
dispatch_method=FSCHAT_CONTROLLER.get("dispatch_method"),
log_level=log_level,
)
_set_app_event(app, started_event)
# add interface to release and load model worker
def release_worker(
model_name: str = Body(..., description="要释放模型的名称", samples=["chatglm-6b"]),
# worker_address: str = Body(None, description="要释放模型的地址,与名称二选一", samples=[FSCHAT_CONTROLLER_address()]),
new_model_name: str = Body(None, description="释放后加载该模型"),
keep_origin: bool = Body(False, description="不释放原模型,加载新模型")
) -> Dict:
available_models = app._controller.list_models()
if new_model_name in available_models:
msg = f"要切换的LLM模型 {new_model_name} 已经存在"
logger.info(msg)
return {"code": 500, "msg": msg}
if new_model_name:
logger.info(f"开始切换LLM模型:从 {model_name} 到 {new_model_name}")
else:
logger.info(f"即将停止LLM模型: {model_name}")
if model_name not in available_models:
msg = f"the model {model_name} is not available"
logger.error(msg)
return {"code": 500, "msg": msg}
worker_address = app._controller.get_worker_address(model_name)
if not worker_address:
msg = f"can not find model_worker address for {model_name}"
logger.error(msg)
return {"code": 500, "msg": msg}
with get_httpx_client() as client:
r = client.post(worker_address + "/release",
json={"new_model_name": new_model_name, "keep_origin": keep_origin})
if r.status_code != 200:
msg = f"failed to release model: {model_name}"
logger.error(msg)
return {"code": 500, "msg": msg}
if new_model_name:
timer = HTTPX_DEFAULT_TIMEOUT # wait for new model_worker register
while timer > 0:
models = app._controller.list_models()
if new_model_name in models:
break
time.sleep(1)
timer -= 1
if timer > 0:
msg = f"sucess change model from {model_name} to {new_model_name}"
logger.info(msg)
return {"code": 200, "msg": msg}
else:
msg = f"failed change model from {model_name} to {new_model_name}"
logger.error(msg)
return {"code": 500, "msg": msg}
else:
msg = f"sucess to release model: {model_name}"
logger.info(msg)
return {"code": 200, "msg": msg}
host = FSCHAT_CONTROLLER["host"]
port = FSCHAT_CONTROLLER["port"]
if log_level == "ERROR":
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
uvicorn.run(app, host=host, port=port, log_level=log_level.lower())
def run_model_worker(
model_name: str = LLM_MODELS[0],
controller_address: str = "",
log_level: str = "INFO",
q: mp.Queue = None,
started_event: mp.Event = None,
):
import uvicorn
from fastapi import Body
import sys
from server.utils import set_httpx_config
set_httpx_config()
kwargs = get_model_worker_config(model_name)
host = kwargs.pop("host")
port = kwargs.pop("port")
kwargs["model_names"] = [model_name]
kwargs["controller_address"] = controller_address or fschat_controller_address()
kwargs["worker_address"] = fschat_model_worker_address(model_name)
model_path = kwargs.get("model_path", "")
kwargs["model_path"] = model_path
app = create_model_worker_app(log_level=log_level, **kwargs)
_set_app_event(app, started_event)
if log_level == "ERROR":
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# add interface to release and load model
def release_model(
new_model_name: str = Body(None, description="释放后加载该模型"),
keep_origin: bool = Body(False, description="不释放原模型,加载新模型")
) -> Dict:
if keep_origin:
if new_model_name:
q.put([model_name, "start", new_model_name])
else:
if new_model_name:
q.put([model_name, "replace", new_model_name])
else:
q.put([model_name, "stop", None])
return {"code": 200, "msg": "done"}
uvicorn.run(app, host=host, port=port, log_level=log_level.lower())
def run_openai_api(log_level: str = "INFO", started_event: mp.Event = None):
import uvicorn
import sys
from server.utils import set_httpx_config
set_httpx_config()
controller_addr = fschat_controller_address()
app = create_openai_api_app(controller_addr, log_level=log_level)
_set_app_event(app, started_event)
host = FSCHAT_OPENAI_API["host"]
port = FSCHAT_OPENAI_API["port"]
if log_level == "ERROR":
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
uvicorn.run(app, host=host, port=port)
def run_api_server(started_event: mp.Event = None, run_mode: str = None):
from server.api import create_app
import uvicorn
from server.utils import set_httpx_config
set_httpx_config()
app = create_app(run_mode=run_mode)
_set_app_event(app, started_event)
host = API_SERVER["host"]
port = API_SERVER["port"]
uvicorn.run(app, host=host, port=port)
def run_webui(started_event: mp.Event = None, run_mode: str = None):
from server.utils import set_httpx_config
set_httpx_config()
host = WEBUI_SERVER["host"]
port = WEBUI_SERVER["port"]
cmd = ["streamlit", "run", "webui.py",
"--server.address", host,
"--server.port", str(port),
"--theme.base", "light",
"--theme.primaryColor", "#165dff",
"--theme.secondaryBackgroundColor", "#f5f5f5",
"--theme.textColor", "#000000",
]
if run_mode == "lite":
cmd += [
"--",
"lite",
]
p = subprocess.Popen(cmd)
started_event.set()
p.wait()
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--all-webui",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py and webui.py",
dest="all_webui",
)
parser.add_argument(
"--all-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py",
dest="all_api",
)
parser.add_argument(
"--llm-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers",
dest="llm_api",
)
parser.add_argument(
"-o",
"--openai-api",
action="store_true",
help="run fastchat's controller/openai_api servers",
dest="openai_api",
)
parser.add_argument(
"-m",
"--model-worker",
action="store_true",
help="run fastchat's model_worker server with specified model name. "
"specify --model-name if not using default LLM_MODELS",
dest="model_worker",
)
parser.add_argument(
"-n",
"--model-name",
type=str,
nargs="+",
default=LLM_MODELS,
help="specify model name for model worker. "
"add addition names with space seperated to start multiple model workers.",
dest="model_name",
)
parser.add_argument(
"-c",
"--controller",
type=str,
help="specify controller address the worker is registered to. default is FSCHAT_CONTROLLER",
dest="controller_address",
)
parser.add_argument(
"--api",
action="store_true",
help="run api.py server",
dest="api",
)
parser.add_argument(
"-p",
"--api-worker",
action="store_true",
help="run online model api such as zhipuai",
dest="api_worker",
)
parser.add_argument(
"-w",
"--webui",
action="store_true",
help="run webui.py server",
dest="webui",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="减少fastchat服务log信息",
dest="quiet",
)
parser.add_argument(
"-i",
"--lite",
action="store_true",
help="以Lite模式运行:仅支持在线API的LLM对话、搜索引擎对话",
dest="lite",
)
args = parser.parse_args()
return args, parser
def dump_server_info(after_start=False, args=None):
import platform
import langchain
import fastchat
from server.utils import api_address, webui_address
print("\n")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print(f"操作系统:{platform.platform()}.")
print(f"python版本:{sys.version}")
print(f"项目版本:{VERSION}")
print(f"langchain版本:{langchain.__version__}. fastchat版本:{fastchat.__version__}")
print("\n")
models = LLM_MODELS
if args and args.model_name:
models = args.model_name
print(f"当前使用的分词器:{TEXT_SPLITTER_NAME}")
print(f"当前启动的LLM模型:{models} @ {llm_device()}")
for model in models:
pprint(get_model_worker_config(model))
print(f"当前Embbedings模型: {EMBEDDING_MODEL} @ {embedding_device()}")
if after_start:
print("\n")
print(f"服务端运行信息:")
if args.openai_api:
print(f" OpenAI API Server: {fschat_openai_api_address()}")
if args.api:
print(f" Chatchat API Server: {api_address()}")
if args.webui:
print(f" Chatchat WEBUI Server: {webui_address()}")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print("\n")
async def start_main_server():
import time
import signal
def handler(signalname):
"""
Python 3.9 has `signal.strsignal(signalnum)` so this closure would not be needed.
Also, 3.8 includes `signal.valid_signals()` that can be used to create a mapping for the same purpose.
"""
def f(signal_received, frame):
raise KeyboardInterrupt(f"{signalname} received")
return f
# This will be inherited by the child process if it is forked (not spawned)
signal.signal(signal.SIGINT, handler("SIGINT"))
signal.signal(signal.SIGTERM, handler("SIGTERM"))
mp.set_start_method("spawn")
manager = mp.Manager()
run_mode = None
queue = manager.Queue()
args, parser = parse_args()
if args.all_webui:
args.openai_api = True
args.model_worker = True
args.api = True
args.api_worker = True
args.webui = True
elif args.all_api:
args.openai_api = True
args.model_worker = True
args.api = True
args.api_worker = True
args.webui = False
elif args.llm_api:
args.openai_api = True
args.model_worker = True
args.api_worker = True
args.api = False
args.webui = False
if args.lite:
args.model_worker = False
run_mode = "lite"
dump_server_info(args=args)
if len(sys.argv) > 1:
logger.info(f"正在启动服务:")
logger.info(f"如需查看 llm_api 日志,请前往 {LOG_PATH}")
processes = {"online_api": {}, "model_worker": {}}
def process_count():
return len(processes) + len(processes["online_api"]) + len(processes["model_worker"]) - 2
if args.quiet or not log_verbose:
log_level = "ERROR"
else:
log_level = "INFO"
controller_started = manager.Event()
if args.openai_api:
process = Process(
target=run_controller,
name=f"controller",
kwargs=dict(log_level=log_level, started_event=controller_started),
daemon=True,
)
processes["controller"] = process
process = Process(
target=run_openai_api,
name=f"openai_api",
daemon=True,
)
processes["openai_api"] = process
model_worker_started = []
if args.model_worker:
for model_name in args.model_name:
config = get_model_worker_config(model_name)
if not config.get("online_api"):
e = manager.Event()
model_worker_started.append(e)
process = Process(
target=run_model_worker,
name=f"model_worker - {model_name}",
kwargs=dict(model_name=model_name,
controller_address=args.controller_address,
log_level=log_level,
q=queue,
started_event=e),
daemon=True,
)
processes["model_worker"][model_name] = process
if args.api_worker:
for model_name in args.model_name:
config = get_model_worker_config(model_name)
if (config.get("online_api")
and config.get("worker_class")
and model_name in FSCHAT_MODEL_WORKERS):
e = manager.Event()
model_worker_started.append(e)
process = Process(
target=run_model_worker,
name=f"api_worker - {model_name}",
kwargs=dict(model_name=model_name,
controller_address=args.controller_address,
log_level=log_level,
q=queue,
started_event=e),
daemon=True,
)
processes["online_api"][model_name] = process
api_started = manager.Event()
if args.api:
process = Process(
target=run_api_server,
name=f"API Server",
kwargs=dict(started_event=api_started, run_mode=run_mode),
daemon=True,
)
processes["api"] = process
webui_started = manager.Event()
if args.webui:
process = Process(
target=run_webui,
name=f"WEBUI Server",
kwargs=dict(started_event=webui_started, run_mode=run_mode),
daemon=True,
)
processes["webui"] = process
if process_count() == 0:
parser.print_help()
else:
try:
# 保证任务收到SIGINT后,能够正常退出
if p := processes.get("controller"):
p.start()
p.name = f"{p.name} ({p.pid})"
controller_started.wait() # 等待controller启动完成
if p := processes.get("openai_api"):
p.start()
p.name = f"{p.name} ({p.pid})"
for n, p in processes.get("model_worker", {}).items():
p.start()
p.name = f"{p.name} ({p.pid})"
for n, p in processes.get("online_api", []).items():
p.start()
p.name = f"{p.name} ({p.pid})"
for e in model_worker_started:
e.wait()
if p := processes.get("api"):
p.start()
p.name = f"{p.name} ({p.pid})"
api_started.wait()
if p := processes.get("webui"):
p.start()
p.name = f"{p.name} ({p.pid})"
webui_started.wait()
dump_server_info(after_start=True, args=args)
while True:
cmd = queue.get()
e = manager.Event()
if isinstance(cmd, list):
model_name, cmd, new_model_name = cmd
if cmd == "start": # 运行新模型
logger.info(f"准备启动新模型进程:{new_model_name}")
process = Process(
target=run_model_worker,
name=f"model_worker - {new_model_name}",
kwargs=dict(model_name=new_model_name,
controller_address=args.controller_address,
log_level=log_level,
q=queue,
started_event=e),
daemon=True,
)
process.start()
process.name = f"{process.name} ({process.pid})"
processes["model_worker"][new_model_name] = process
e.wait()
logger.info(f"成功启动新模型进程:{new_model_name}")
elif cmd == "stop":
if process := processes["model_worker"].get(model_name):
time.sleep(1)
process.terminate()
process.join()
logger.info(f"停止模型进程:{model_name}")
else:
logger.error(f"未找到模型进程:{model_name}")
elif cmd == "replace":
if process := processes["model_worker"].pop(model_name, None):
logger.info(f"停止模型进程:{model_name}")
start_time = datetime.now()
time.sleep(1)
process.terminate()
process.join()
process = Process(
target=run_model_worker,
name=f"model_worker - {new_model_name}",
kwargs=dict(model_name=new_model_name,
controller_address=args.controller_address,
log_level=log_level,
q=queue,
started_event=e),
daemon=True,
)
process.start()
process.name = f"{process.name} ({process.pid})"
processes["model_worker"][new_model_name] = process
e.wait()
timing = datetime.now() - start_time
logger.info(f"成功启动新模型进程:{new_model_name}。用时:{timing}。")
else:
logger.error(f"未找到模型进程:{model_name}")
# for process in processes.get("model_worker", {}).values():
# process.join()
# for process in processes.get("online_api", {}).values():
# process.join()
# for name, process in processes.items():
# if name not in ["model_worker", "online_api"]:
# if isinstance(p, dict):
# for work_process in p.values():
# work_process.join()
# else:
# process.join()
except Exception as e:
logger.error(e)
logger.warning("Caught KeyboardInterrupt! Setting stop event...")
finally:
for p in processes.values():
logger.warning("Sending SIGKILL to %s", p)
# Queues and other inter-process communication primitives can break when
# process is killed, but we don't care here
if isinstance(p, dict):
for process in p.values():
process.kill()
else:
p.kill()
for p in processes.values():
logger.info("Process status: %s", p) | null |
22,215 | import sys
import os
import torch
from datetime import datetime
from configs import (
MODEL_PATH,
EMBEDDING_MODEL,
EMBEDDING_KEYWORD_FILE,
)
from safetensors.torch import save_model
from sentence_transformers import SentenceTransformer
from langchain_core._api import deprecated
def add_keyword_to_model(model_name=EMBEDDING_MODEL, keyword_file: str = "", output_model_path: str = None):
key_words = []
with open(keyword_file, "r") as f:
for line in f:
key_words.append(line.strip())
st_model = SentenceTransformer(model_name)
key_words_len = len(key_words)
word_embedding_model = st_model._first_module()
bert_model = word_embedding_model.auto_model
tokenizer = word_embedding_model.tokenizer
key_words_embedding = get_keyword_embedding(bert_model, tokenizer, key_words)
embedding_weight = bert_model.embeddings.word_embeddings.weight
embedding_weight_len = len(embedding_weight)
tokenizer.add_tokens(key_words)
bert_model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=32)
embedding_weight = bert_model.embeddings.word_embeddings.weight
with torch.no_grad():
embedding_weight[embedding_weight_len:embedding_weight_len + key_words_len, :] = key_words_embedding
if output_model_path:
os.makedirs(output_model_path, exist_ok=True)
word_embedding_model.save(output_model_path)
safetensors_file = os.path.join(output_model_path, "model.safetensors")
metadata = {'format': 'pt'}
save_model(bert_model, safetensors_file, metadata)
print("save model to {}".format(output_model_path))
def add_keyword_to_embedding_model(path: str = EMBEDDING_KEYWORD_FILE):
keyword_file = os.path.join(path)
model_name = MODEL_PATH["embed_model"][EMBEDDING_MODEL]
model_parent_directory = os.path.dirname(model_name)
current_time = datetime.now().strftime('%Y%m%d_%H%M%S')
output_model_name = "{}_Merge_Keywords_{}".format(EMBEDDING_MODEL, current_time)
output_model_path = os.path.join(model_parent_directory, output_model_name)
add_keyword_to_model(model_name, keyword_file, output_model_path) | null |
22,216 | import sys
import os
import subprocess
import re
import logging
import argparse
LOG_PATH = "./logs/"
base_check_sh = """while [ `grep -c "Uvicorn running on" {0}/{1}.log` -eq '0' ];do
sleep 5s;
echo "wait {2} running"
done
echo '{2} running' """
def string_args(args, args_list):
"""将args中的key转化为字符串"""
args_str = ""
for key, value in args._get_kwargs():
# args._get_kwargs中的key以_为分隔符,先转换,再判断是否在指定的args列表中
key = key.replace("_", "-")
if key not in args_list:
continue
# fastchat中port,host没有前缀,去除前缀
key = key.split("-")[-1] if re.search("port|host", key) else key
if not value:
pass
# 1==True -> True
elif isinstance(value, bool) and value == True:
args_str += f" --{key} "
elif isinstance(value, list) or isinstance(value, tuple) or isinstance(value, set):
value = " ".join(value)
args_str += f" --{key} {value} "
else:
args_str += f" --{key} {value} "
return args_str
def launch_worker(item, args, worker_args=worker_args):
log_name = item.split("/")[-1].split("\\")[-1].replace("-", "_").replace("@", "_").replace(".", "_")
# 先分割model-path-address,在传到string_args中分析参数
args.model_path, args.worker_host, args.worker_port = item.split("@")
args.worker_address = f"http://{args.worker_host}:{args.worker_port}"
print("*" * 80)
print(f"如长时间未启动,请到{LOG_PATH}{log_name}.log下查看日志")
worker_str_args = string_args(args, worker_args)
print(worker_str_args)
worker_sh = base_launch_sh.format("model_worker", worker_str_args, LOG_PATH, f"worker_{log_name}")
worker_check_sh = base_check_sh.format(LOG_PATH, f"worker_{log_name}", "model_worker")
subprocess.run(worker_sh, shell=True, check=True)
subprocess.run(worker_check_sh, shell=True, check=True)
def launch_all(args,
controller_args=controller_args,
worker_args=worker_args,
server_args=server_args
):
print(f"Launching llm service,logs are located in {LOG_PATH}...")
print(f"开始启动LLM服务,请到{LOG_PATH}下监控各模块日志...")
controller_str_args = string_args(args, controller_args)
controller_sh = base_launch_sh.format("controller", controller_str_args, LOG_PATH, "controller")
controller_check_sh = base_check_sh.format(LOG_PATH, "controller", "controller")
subprocess.run(controller_sh, shell=True, check=True)
subprocess.run(controller_check_sh, shell=True, check=True)
print(f"worker启动时间视设备不同而不同,约需3-10分钟,请耐心等待...")
if isinstance(args.model_path_address, str):
launch_worker(args.model_path_address, args=args, worker_args=worker_args)
else:
for idx, item in enumerate(args.model_path_address):
print(f"开始加载第{idx}个模型:{item}")
launch_worker(item, args=args, worker_args=worker_args)
server_str_args = string_args(args, server_args)
server_sh = base_launch_sh.format("openai_api_server", server_str_args, LOG_PATH, "openai_api_server")
server_check_sh = base_check_sh.format(LOG_PATH, "openai_api_server", "openai_api_server")
subprocess.run(server_sh, shell=True, check=True)
subprocess.run(server_check_sh, shell=True, check=True)
print("Launching LLM service done!")
print("LLM服务启动完毕。") | null |
22,217 | import nltk
import sys
import os
from configs import VERSION
from configs.model_config import NLTK_DATA_PATH
from configs.server_config import OPEN_CROSS_DOMAIN
import argparse
import uvicorn
from fastapi import Body
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import RedirectResponse
from server.chat.chat import chat
from server.chat.search_engine_chat import search_engine_chat
from server.chat.completion import completion
from server.chat.feedback import chat_feedback
from server.embeddings_api import embed_texts_endpoint
from server.llm_api import (list_running_models, list_config_models,
change_llm_model, stop_llm_model,
get_model_config, list_search_engines)
from server.utils import (BaseResponse, ListResponse, FastAPI, MakeFastAPIOffline,
get_server_configs, get_prompt_template)
from typing import List, Literal
def run_api(host, port, **kwargs):
if kwargs.get("ssl_keyfile") and kwargs.get("ssl_certfile"):
uvicorn.run(app,
host=host,
port=port,
ssl_keyfile=kwargs.get("ssl_keyfile"),
ssl_certfile=kwargs.get("ssl_certfile"),
)
else:
uvicorn.run(app, host=host, port=port) | null |
22,218 |
def torch_gc():
try:
import torch
if torch.cuda.is_available():
# with torch.cuda.device(DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
elif torch.backends.mps.is_available():
try:
from torch.mps import empty_cache
empty_cache()
except Exception as e:
msg = ("如果您使用的是 macOS 建议将 pytorch 版本升级至 2.0.0 或更高版本,"
"以支持及时清理 torch 产生的内存占用。")
logger.error(f'{e.__class__.__name__}: {msg}',
exc_info=e if log_verbose else None)
except Exception:
... | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.