id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
20,394 | import ml_collections
def get_b16_config():
"""Returns the ViT-B/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 768
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 3072
config.transformer.num_heads = 12
config.transformer.num_layers = 12
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
The provided code snippet includes necessary dependencies for implementing the `get_r50_b16_config` function. Write a Python function `def get_r50_b16_config()` to solve the following problem:
Returns the Resnet50 + ViT-B/16 configuration.
Here is the function:
def get_r50_b16_config():
"""Returns the Resnet50 + ViT-B/16 configuration."""
config = get_b16_config()
del config.patches.size
config.patches.grid = (14, 14)
config.resnet = ml_collections.ConfigDict()
config.resnet.num_layers = (3, 4, 9)
config.resnet.width_factor = 1
return config | Returns the Resnet50 + ViT-B/16 configuration. |
20,395 | import ml_collections
def get_b16_config():
"""Returns the ViT-B/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 768
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 3072
config.transformer.num_heads = 12
config.transformer.num_layers = 12
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
The provided code snippet includes necessary dependencies for implementing the `get_b32_config` function. Write a Python function `def get_b32_config()` to solve the following problem:
Returns the ViT-B/32 configuration.
Here is the function:
def get_b32_config():
"""Returns the ViT-B/32 configuration."""
config = get_b16_config()
config.patches.size = (32, 32)
return config | Returns the ViT-B/32 configuration. |
20,396 | import ml_collections
def get_b16_config():
"""Returns the ViT-B/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 768
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 3072
config.transformer.num_heads = 12
config.transformer.num_layers = 12
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
The provided code snippet includes necessary dependencies for implementing the `get_b8_config` function. Write a Python function `def get_b8_config()` to solve the following problem:
Returns the ViT-B/32 configuration.
Here is the function:
def get_b8_config():
"""Returns the ViT-B/32 configuration."""
config = get_b16_config()
config.patches.size = (8, 8)
return config | Returns the ViT-B/32 configuration. |
20,397 | import ml_collections
def get_l16_config():
"""Returns the ViT-L/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1024
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 4096
config.transformer.num_heads = 16
config.transformer.num_layers = 24
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
The provided code snippet includes necessary dependencies for implementing the `get_l32_config` function. Write a Python function `def get_l32_config()` to solve the following problem:
Returns the ViT-L/32 configuration.
Here is the function:
def get_l32_config():
"""Returns the ViT-L/32 configuration."""
config = get_l16_config()
config.patches.size = (32, 32)
return config | Returns the ViT-L/32 configuration. |
20,398 | import ml_collections
The provided code snippet includes necessary dependencies for implementing the `get_h14_config` function. Write a Python function `def get_h14_config()` to solve the following problem:
Returns the ViT-L/16 configuration.
Here is the function:
def get_h14_config():
"""Returns the ViT-L/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (14, 14)})
config.hidden_size = 1280
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 5120
config.transformer.num_heads = 16
config.transformer.num_layers = 32
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config | Returns the ViT-L/16 configuration. |
20,399 | import numpy as np
from typing import List, Tuple, Dict
from sklearn.metrics import (
precision_recall_curve,
average_precision_score,
f1_score
)
def get_continuous_ids(probe_labels: List[int]) -> Dict[int, int]:
sorted(probe_labels)
id2continuousid = {}
for idx, p_id in enumerate(probe_labels):
id2continuousid[p_id] = idx
return id2continuousid | null |
20,400 | import numpy as np
from typing import List, Tuple, Dict
from sklearn.metrics import (
precision_recall_curve,
average_precision_score,
f1_score
)
The provided code snippet includes necessary dependencies for implementing the `multihot` function. Write a Python function `def multihot(x: List[List[int]], nb_classes: int) -> np.ndarray` to solve the following problem:
transform to multihot encoding Arguments: x: list of multi-class integer labels, in the range [0, nb_classes-1] nb_classes: number of classes for the multi-hot vector Returns: multihot: multihot vector of type int, (num_samples, nb_classes)
Here is the function:
def multihot(x: List[List[int]], nb_classes: int) -> np.ndarray:
"""transform to multihot encoding
Arguments:
x: list of multi-class integer labels, in the range
[0, nb_classes-1]
nb_classes: number of classes for the multi-hot vector
Returns:
multihot: multihot vector of type int, (num_samples, nb_classes)
"""
num_samples = len(x)
multihot = np.zeros((num_samples, nb_classes), dtype=np.int32)
for idx, labs in enumerate(x):
for lab in labs:
multihot[idx, lab] = 1
return multihot.astype(np.int) | transform to multihot encoding Arguments: x: list of multi-class integer labels, in the range [0, nb_classes-1] nb_classes: number of classes for the multi-hot vector Returns: multihot: multihot vector of type int, (num_samples, nb_classes) |
20,401 | import numpy as np
from typing import List, Tuple, Dict
from sklearn.metrics import (
precision_recall_curve,
average_precision_score,
f1_score
)
The provided code snippet includes necessary dependencies for implementing the `compute_map` function. Write a Python function `def compute_map( scores: np.ndarray, multihot_targets: np.ndarray ) -> Tuple[np.ndarray, np.ndarray, float, float]` to solve the following problem:
Compute the mean average precision across all class labels. Arguments: scores: matrix of per-class distances, of size num_samples x nb_classes multihot_targets: matrix of multi-hot target predictions, of size num_samples x nb_classes Returns: ap: list of average-precision scores, one for each of the nb_classes classes. ar: list of average-recall scores, one for each of the nb_classes classes. mAP: the mean average precision score over all average precisions for all nb_classes classes. mAR: the mean average recall score over all average precisions for all nb_classes classes.
Here is the function:
def compute_map(
scores: np.ndarray, multihot_targets: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, float, float]:
"""Compute the mean average precision across all class labels.
Arguments:
scores: matrix of per-class distances,
of size num_samples x nb_classes
multihot_targets: matrix of multi-hot target predictions,
of size num_samples x nb_classes
Returns:
ap: list of average-precision scores, one for each of
the nb_classes classes.
ar: list of average-recall scores, one for each of
the nb_classes classes.
mAP: the mean average precision score over all average
precisions for all nb_classes classes.
mAR: the mean average recall score over all average
precisions for all nb_classes classes.
"""
nb_classes = scores.shape[1]
ap = np.zeros((nb_classes,), dtype=np.float32)
ar = np.zeros((nb_classes,), dtype=np.float32)
for c in range(nb_classes):
y_true = multihot_targets[:, c]
y_scores = scores[:, c]
# Use interpolated average precision (a la PASCAL
try:
ap[c] = average_precision_score(y_true, y_scores)
except ValueError:
ap[c] = -1
# Also get the average of the recalls on the raw PR-curve
try:
_, rec, _ = precision_recall_curve(y_true, y_scores)
ar[c] = rec.mean()
except ValueError:
ar[c] = -1
mAP = ap.mean()
mAR = ar.mean()
return ap, ar, mAP, mAR | Compute the mean average precision across all class labels. Arguments: scores: matrix of per-class distances, of size num_samples x nb_classes multihot_targets: matrix of multi-hot target predictions, of size num_samples x nb_classes Returns: ap: list of average-precision scores, one for each of the nb_classes classes. ar: list of average-recall scores, one for each of the nb_classes classes. mAP: the mean average precision score over all average precisions for all nb_classes classes. mAR: the mean average recall score over all average precisions for all nb_classes classes. |
20,402 | import numpy as np
from typing import List, Tuple, Dict
from sklearn.metrics import (
precision_recall_curve,
average_precision_score,
f1_score
)
def compute_f1(
multihot_targets: np.ndarray, scores: np.ndarray, threshold: float = 0.5
) -> Tuple[float, float, float]:
# change scores to predict_labels
predict_labels = scores > threshold
predict_labels = predict_labels.astype(np.int)
# change targets to multihot
f1 = {}
f1["micro"] = f1_score(
y_true=multihot_targets,
y_pred=predict_labels,
average="micro"
)
f1["samples"] = f1_score(
y_true=multihot_targets,
y_pred=predict_labels,
average="samples"
)
f1["macro"] = f1_score(
y_true=multihot_targets,
y_pred=predict_labels,
average="macro"
)
f1["none"] = f1_score(
y_true=multihot_targets,
y_pred=predict_labels,
average=None
)
return f1["micro"], f1["samples"], f1["macro"], f1["none"]
def get_best_f1_scores(
multihot_targets: np.ndarray, scores: np.ndarray, threshold_end: int
) -> Dict[str, float]:
end = 0.5
end = 0.05
end = threshold_end
thrs = np.linspace(
end, 0.95, int(np.round((0.95 - end) / 0.05)) + 1, endpoint=True
)
f1_micros = []
f1_macros = []
f1_samples = []
f1_none = []
for thr in thrs:
_micros, _samples, _macros, _none = compute_f1(multihot_targets, scores, thr)
f1_micros.append(_micros)
f1_samples.append(_samples)
f1_macros.append(_macros)
f1_none.append(_none)
f1_macros_m = max(f1_macros)
b_thr = np.argmax(f1_macros)
f1_micros_m = f1_micros[b_thr]
f1_samples_m = f1_samples[b_thr]
f1_none_m = f1_none[b_thr]
f1 = {}
f1["micro"] = f1_micros_m
f1["macro"] = f1_macros_m
f1["samples"] = f1_samples_m
f1["threshold"] = thrs[b_thr]
f1["none"] = f1_none_m
return f1 | null |
20,403 | import numpy as np
import torch
from sklearn.metrics import (
accuracy_score, average_precision_score, f1_score, roc_auc_score
)
def accuracy(y_probs, y_true):
# y_prob: (num_images, num_classes)
y_preds = np.argmax(y_probs, axis=1)
accuracy = accuracy_score(y_true, y_preds)
error = 1.0 - accuracy
return accuracy, error
def top_n_accuracy(y_probs, truths, n=1):
# y_prob: (num_images, num_classes)
# truth: (num_images, num_classes) multi/one-hot encoding
best_n = np.argsort(y_probs, axis=1)[:, -n:]
if isinstance(truths, np.ndarray) and truths.shape == y_probs.shape:
ts = np.argmax(truths, axis=1)
else:
# a list of GT class idx
ts = truths
num_input = y_probs.shape[0]
successes = 0
for i in range(num_input):
if ts[i] in best_n[i, :]:
successes += 1
return float(successes) / num_input
def compute_acc_auc(y_probs, y_true_ids):
onehot_tgts = np.zeros_like(y_probs)
for idx, t in enumerate(y_true_ids):
onehot_tgts[idx, t] = 1.
num_classes = y_probs.shape[1]
if num_classes == 2:
top1, _ = accuracy(y_probs, y_true_ids)
# so precision can set all to 2
try:
auc = roc_auc_score(onehot_tgts, y_probs, average='macro')
except ValueError as e:
print(f"value error encountered {e}, set auc sccore to -1.")
auc = -1
return {"top1": top1, "rocauc": auc}
top1, _ = accuracy(y_probs, y_true_ids)
k = min([5, num_classes]) # if number of labels < 5, use the total class
top5 = top_n_accuracy(y_probs, y_true_ids, k)
return {"top1": top1, f"top{k}": top5} | null |
20,404 | import numpy as np
import torch
from sklearn.metrics import (
accuracy_score, average_precision_score, f1_score, roc_auc_score
)
def topks_correct(preds, labels, ks):
"""Computes the number of top-k correct predictions for each k."""
assert preds.size(0) == labels.size(
0
), "Batch dim of predictions and labels must match"
# Find the top max_k predictions for each sample
_top_max_k_vals, top_max_k_inds = torch.topk(
preds, max(ks), dim=1, largest=True, sorted=True
)
# (batch_size, max_k) -> (max_k, batch_size)
top_max_k_inds = top_max_k_inds.t()
# (batch_size, ) -> (max_k, batch_size)
rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)
# (i, j) = 1 if top i-th prediction for the j-th sample is correct
top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)
# Compute the number of topk correct predictions for each k
topks_correct = [
top_max_k_correct[:k, :].reshape(-1).float().sum() for k in ks
]
return topks_correct
The provided code snippet includes necessary dependencies for implementing the `topk_errors` function. Write a Python function `def topk_errors(preds, labels, ks)` to solve the following problem:
Computes the top-k error for each k.
Here is the function:
def topk_errors(preds, labels, ks):
"""Computes the top-k error for each k."""
if int(labels.min()) < 0: # has ignore
keep_ids = np.where(labels.cpu() >= 0)[0]
preds = preds[keep_ids, :]
labels = labels[keep_ids]
num_topks_correct = topks_correct(preds, labels, ks)
return [(1.0 - x / preds.size(0)) for x in num_topks_correct] | Computes the top-k error for each k. |
20,405 | import numpy as np
import torch
from sklearn.metrics import (
accuracy_score, average_precision_score, f1_score, roc_auc_score
)
def topks_correct(preds, labels, ks):
"""Computes the number of top-k correct predictions for each k."""
assert preds.size(0) == labels.size(
0
), "Batch dim of predictions and labels must match"
# Find the top max_k predictions for each sample
_top_max_k_vals, top_max_k_inds = torch.topk(
preds, max(ks), dim=1, largest=True, sorted=True
)
# (batch_size, max_k) -> (max_k, batch_size)
top_max_k_inds = top_max_k_inds.t()
# (batch_size, ) -> (max_k, batch_size)
rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)
# (i, j) = 1 if top i-th prediction for the j-th sample is correct
top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)
# Compute the number of topk correct predictions for each k
topks_correct = [
top_max_k_correct[:k, :].reshape(-1).float().sum() for k in ks
]
return topks_correct
The provided code snippet includes necessary dependencies for implementing the `topk_accuracies` function. Write a Python function `def topk_accuracies(preds, labels, ks)` to solve the following problem:
Computes the top-k accuracy for each k.
Here is the function:
def topk_accuracies(preds, labels, ks):
"""Computes the top-k accuracy for each k."""
num_topks_correct = topks_correct(preds, labels, ks)
return [(x / preds.size(0)) for x in num_topks_correct] | Computes the top-k accuracy for each k. |
20,406 | from functools import partial
import torch
import torch.nn as nn
from .adapter_block import Pfeiffer_Block
from ..vit_backbones.vit_mae import VisionTransformer
from timm.models.layers import PatchEmbed
from ...utils import logging
def vit_base_patch16(adapter_cfg, **kwargs):
model = ADPT_VisionTransformer(
adapter_cfg,
drop_path_rate=0.1, global_pool=True, # using default settings for mae-finetune
patch_size=16, embed_dim=768, depth=12, num_heads=12,
mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_large_patch16(adapter_cfg, **kwargs):
model = ADPT_VisionTransformer(
adapter_cfg,
drop_path_rate=0.1, global_pool=True, # using default settings for mae-finetune
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_huge_patch14(adapter_cfg, **kwargs):
model = ADPT_VisionTransformer(
adapter_cfg,
drop_path_rate=0.1, global_pool=True, # using default settings for mae-finetune
patch_size=14, embed_dim=1280, depth=32, num_heads=16,
mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def build_model(model_type, adapter_cfg):
if "vitb" in model_type:
return vit_base_patch16(adapter_cfg)
elif "vitl" in model_type:
return vit_large_patch16(adapter_cfg)
elif "vith" in model_type:
return vit_huge_patch14(adapter_cfg) | null |
20,407 | import math
import torch
import torch.nn as nn
import torchvision as tv
from functools import partial, reduce
from operator import mul
from torch.nn import Conv2d, Dropout
from timm.models.vision_transformer import _cfg
from ..vit_backbones.vit_mae import VisionTransformer
from ...utils import logging
def vit_base_patch16(prompt_cfg, **kwargs):
def vit_large_patch16(prompt_cfg, **kwargs):
def vit_huge_patch14(prompt_cfg, **kwargs):
def build_model(model_type, prompt_cfg):
if "vitb" in model_type:
return vit_base_patch16(prompt_cfg)
elif "vitl" in model_type:
return vit_large_patch16(prompt_cfg)
elif "vith" in model_type:
return vit_huge_patch14(prompt_cfg) | null |
20,408 | from functools import partial
import torch
import torch.nn as nn
import timm.models.vision_transformer
def vit_base_patch16(**kwargs):
def vit_large_patch16(**kwargs):
def vit_huge_patch14(**kwargs):
def build_model(model_type):
if "vitb" in model_type:
return vit_base_patch16()
elif "vitl" in model_type:
return vit_large_patch16()
elif "vith" in model_type:
return vit_huge_patch14() | null |
20,409 | import copy
import logging
import math
from os.path import join as pjoin
from turtle import forward
import torch
import torch.nn as nn
import numpy as np
from torch.nn import Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
from ...configs import vit_configs as configs
def swish(x):
return x * torch.sigmoid(x) | null |
20,410 | import copy
import logging
import math
from os.path import join as pjoin
from turtle import forward
import torch
import torch.nn as nn
import numpy as np
from torch.nn import Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
from ...configs import vit_configs as configs
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-5)
return F.conv2d(x, w, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride,
padding=1, bias=bias, groups=groups) | null |
20,411 | import copy
import logging
import math
from os.path import join as pjoin
from turtle import forward
import torch
import torch.nn as nn
import numpy as np
from torch.nn import Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
from ...configs import vit_configs as configs
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-5)
return F.conv2d(x, w, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride,
padding=0, bias=bias) | null |
20,412 | import math
import torch
import torch.nn as nn
from functools import partial, reduce
from operator import mul
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.layers.helpers import to_2tuple
from timm.models.layers import PatchEmbed
class VisionTransformerMoCo(VisionTransformer):
def __init__(self, stop_grad_conv1=False, **kwargs):
super().__init__(**kwargs)
# Use fixed 2D sin-cos position embedding
self.build_2d_sincos_position_embedding()
# weight initialization
for name, m in self.named_modules():
if isinstance(m, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(m.weight.shape[0] // 3 + m.weight.shape[1]))
nn.init.uniform_(m.weight, -val, val)
else:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
nn.init.normal_(self.cls_token, std=1e-6)
if isinstance(self.patch_embed, PatchEmbed):
# xavier_uniform initialization
val = math.sqrt(6. / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim))
nn.init.uniform_(self.patch_embed.proj.weight, -val, val)
nn.init.zeros_(self.patch_embed.proj.bias)
if stop_grad_conv1:
self.patch_embed.proj.weight.requires_grad = False
self.patch_embed.proj.bias.requires_grad = False
def build_2d_sincos_position_embedding(self, temperature=10000.):
h, w = self.patch_embed.grid_size
grid_w = torch.arange(w, dtype=torch.float32)
grid_h = torch.arange(h, dtype=torch.float32)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h)
assert self.embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
pos_dim = self.embed_dim // 4
omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
omega = 1. / (temperature**omega)
out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega])
out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega])
pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1)[None, :, :]
assert self.num_tokens == 1, 'Assuming one and only one token, [cls]'
pe_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32)
self.pos_embed = nn.Parameter(torch.cat([pe_token, pos_emb], dim=1))
self.pos_embed.requires_grad = False
def vit_small(**kwargs):
model = VisionTransformerMoCo(
patch_size=16, embed_dim=384, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model | null |
20,413 | import math
import torch
import torch.nn as nn
from functools import partial, reduce
from operator import mul
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.layers.helpers import to_2tuple
from timm.models.layers import PatchEmbed
class VisionTransformerMoCo(VisionTransformer):
def __init__(self, stop_grad_conv1=False, **kwargs):
super().__init__(**kwargs)
# Use fixed 2D sin-cos position embedding
self.build_2d_sincos_position_embedding()
# weight initialization
for name, m in self.named_modules():
if isinstance(m, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(m.weight.shape[0] // 3 + m.weight.shape[1]))
nn.init.uniform_(m.weight, -val, val)
else:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
nn.init.normal_(self.cls_token, std=1e-6)
if isinstance(self.patch_embed, PatchEmbed):
# xavier_uniform initialization
val = math.sqrt(6. / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim))
nn.init.uniform_(self.patch_embed.proj.weight, -val, val)
nn.init.zeros_(self.patch_embed.proj.bias)
if stop_grad_conv1:
self.patch_embed.proj.weight.requires_grad = False
self.patch_embed.proj.bias.requires_grad = False
def build_2d_sincos_position_embedding(self, temperature=10000.):
h, w = self.patch_embed.grid_size
grid_w = torch.arange(w, dtype=torch.float32)
grid_h = torch.arange(h, dtype=torch.float32)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h)
assert self.embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
pos_dim = self.embed_dim // 4
omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
omega = 1. / (temperature**omega)
out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega])
out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega])
pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1)[None, :, :]
assert self.num_tokens == 1, 'Assuming one and only one token, [cls]'
pe_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32)
self.pos_embed = nn.Parameter(torch.cat([pe_token, pos_emb], dim=1))
self.pos_embed.requires_grad = False
class ConvStem(nn.Module):
"""
ConvStem, from Early Convolutions Help Transformers See Better, Tete et al. https://arxiv.org/abs/2106.14881
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):
super().__init__()
assert patch_size == 16, 'ConvStem only supports patch size of 16'
assert embed_dim % 8 == 0, 'Embed dimension must be divisible by 8 for ConvStem'
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
# build stem, similar to the design in https://arxiv.org/abs/2106.14881
stem = []
input_dim, output_dim = 3, embed_dim // 8
for l in range(4):
stem.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=2, padding=1, bias=False))
stem.append(nn.BatchNorm2d(output_dim))
stem.append(nn.ReLU(inplace=True))
input_dim = output_dim
output_dim *= 2
stem.append(nn.Conv2d(input_dim, embed_dim, kernel_size=1))
self.proj = nn.Sequential(*stem)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
def vit_conv_small(**kwargs):
# minus one ViT block
model = VisionTransformerMoCo(
patch_size=16, embed_dim=384, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs)
model.default_cfg = _cfg()
return model | null |
20,414 | import math
import torch
import torch.nn as nn
from functools import partial, reduce
from operator import mul
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.layers.helpers import to_2tuple
from timm.models.layers import PatchEmbed
class VisionTransformerMoCo(VisionTransformer):
def __init__(self, stop_grad_conv1=False, **kwargs):
super().__init__(**kwargs)
# Use fixed 2D sin-cos position embedding
self.build_2d_sincos_position_embedding()
# weight initialization
for name, m in self.named_modules():
if isinstance(m, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(m.weight.shape[0] // 3 + m.weight.shape[1]))
nn.init.uniform_(m.weight, -val, val)
else:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
nn.init.normal_(self.cls_token, std=1e-6)
if isinstance(self.patch_embed, PatchEmbed):
# xavier_uniform initialization
val = math.sqrt(6. / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim))
nn.init.uniform_(self.patch_embed.proj.weight, -val, val)
nn.init.zeros_(self.patch_embed.proj.bias)
if stop_grad_conv1:
self.patch_embed.proj.weight.requires_grad = False
self.patch_embed.proj.bias.requires_grad = False
def build_2d_sincos_position_embedding(self, temperature=10000.):
h, w = self.patch_embed.grid_size
grid_w = torch.arange(w, dtype=torch.float32)
grid_h = torch.arange(h, dtype=torch.float32)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h)
assert self.embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
pos_dim = self.embed_dim // 4
omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
omega = 1. / (temperature**omega)
out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega])
out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega])
pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1)[None, :, :]
assert self.num_tokens == 1, 'Assuming one and only one token, [cls]'
pe_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32)
self.pos_embed = nn.Parameter(torch.cat([pe_token, pos_emb], dim=1))
self.pos_embed.requires_grad = False
class ConvStem(nn.Module):
"""
ConvStem, from Early Convolutions Help Transformers See Better, Tete et al. https://arxiv.org/abs/2106.14881
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):
super().__init__()
assert patch_size == 16, 'ConvStem only supports patch size of 16'
assert embed_dim % 8 == 0, 'Embed dimension must be divisible by 8 for ConvStem'
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
# build stem, similar to the design in https://arxiv.org/abs/2106.14881
stem = []
input_dim, output_dim = 3, embed_dim // 8
for l in range(4):
stem.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=2, padding=1, bias=False))
stem.append(nn.BatchNorm2d(output_dim))
stem.append(nn.ReLU(inplace=True))
input_dim = output_dim
output_dim *= 2
stem.append(nn.Conv2d(input_dim, embed_dim, kernel_size=1))
self.proj = nn.Sequential(*stem)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
def vit_conv_base(**kwargs):
# minus one ViT block
model = VisionTransformerMoCo(
patch_size=16, embed_dim=768, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs)
model.default_cfg = _cfg()
return model | null |
20,415 | import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from ...utils import logging
The provided code snippet includes necessary dependencies for implementing the `window_partition` function. Write a Python function `def window_partition(x, window_size)` to solve the following problem:
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
Here is the function:
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows | Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) |
20,416 | import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from ...utils import logging
The provided code snippet includes necessary dependencies for implementing the `window_reverse` function. Write a Python function `def window_reverse(windows, window_size, H, W)` to solve the following problem:
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
Here is the function:
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x | Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) |
20,417 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXt(nn.Module):
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.,
layer_scale_init_value=1e-6, head_init_scale=1.,
):
def _init_weights(self, m):
def forward_features(self, x):
def forward(self, x):
model_urls = {
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
"convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
"convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
"convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
"convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
"convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
}
def convnext_tiny(pretrained=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
if pretrained:
url = model_urls['convnext_tiny_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(checkpoint["model"])
return model | null |
20,418 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.,
layer_scale_init_value=1e-6, head_init_scale=1.,
):
super().__init__()
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
model_urls = {
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
"convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
"convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
"convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
"convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
"convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
}
def convnext_small(pretrained=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs)
if pretrained:
url = model_urls['convnext_small_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model | null |
20,419 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXt(nn.Module):
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.,
layer_scale_init_value=1e-6, head_init_scale=1.,
):
def _init_weights(self, m):
def forward_features(self, x):
def forward(self, x):
model_urls = {
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
"convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
"convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
"convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
"convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
"convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
}
def convnext_base(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
if pretrained:
url = model_urls['convnext_base_22k'] if in_22k else model_urls['convnext_base_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model | null |
20,420 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXt(nn.Module):
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.,
layer_scale_init_value=1e-6, head_init_scale=1.,
):
def _init_weights(self, m):
def forward_features(self, x):
def forward(self, x):
model_urls = {
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
"convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
"convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
"convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
"convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
"convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
}
def convnext_large(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
if pretrained:
url = model_urls['convnext_large_22k'] if in_22k else model_urls['convnext_large_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model | null |
20,421 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXt(nn.Module):
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.,
layer_scale_init_value=1e-6, head_init_scale=1.,
):
def _init_weights(self, m):
def forward_features(self, x):
def forward(self, x):
model_urls = {
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
"convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
"convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
"convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
"convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
"convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
}
def convnext_xlarge(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
if pretrained:
assert in_22k, "only ImageNet-22K pre-trained ConvNeXt-XL is available; please set in_22k=True"
url = model_urls['convnext_xlarge_22k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model | null |
20,422 | import numpy as np
import torch
import os
from .vit_backbones.swin_transformer import SwinTransformer
from .vit_backbones.vit import VisionTransformer
from .vit_backbones.vit_moco import vit_base
from .vit_backbones.vit_mae import build_model as mae_vit_model
from .vit_prompt.vit import PromptedVisionTransformer
from .vit_prompt.swin_transformer import PromptedSwinTransformer
from .vit_prompt.vit_moco import vit_base as prompt_vit_base
from .vit_prompt.vit_mae import build_model as prompt_mae_vit_model
from .vit_adapter.vit_mae import build_model as adapter_mae_vit_model
from .vit_adapter.vit_moco import vit_base as adapter_vit_base
from .vit_adapter.vit import ADPT_VisionTransformer
MODEL_ZOO = {
"swint_imagenet": "swin_tiny_patch4_window7_224.pth",
"swint_imagenet_ssl": "moby_swin_t_300ep_pretrained.pth",
"swins_imagenet": "swin_small_patch4_window7_224.pth",
"swinb_imagenet_224": "swin_base_patch4_window7_224.pth",
"swinb_imagenet_384": "swin_base_patch4_window12_384.pth",
"swinb_imagenet22k_224": "swin_base_patch4_window7_224_22k.pth",
"swinb_imagenet22k_384": "swin_base_patch4_window12_384_22k.pth",
"swinl_imagenet22k_224": "swin_large_patch4_window7_224_22k.pth",
"sup_vitb8": "ViT-B_8.npz",
"sup_vitb16_224": "ViT-B_16-224.npz",
"sup_vitb16": "ViT-B_16.npz",
"sup_vitl16_224": "ViT-L_16-224.npz",
"sup_vitl16": "ViT-L_16.npz",
"sup_vitb8_imagenet21k": "imagenet21k_ViT-B_8.npz",
"sup_vitb32_imagenet21k": "imagenet21k_ViT-B_32.npz",
"sup_vitb16_imagenet21k": "imagenet21k_ViT-B_16.npz",
"sup_vitl16_imagenet21k": "imagenet21k_ViT-L_16.npz",
"sup_vitl32_imagenet21k": "imagenet21k_ViT-L_32.npz",
"sup_vith14_imagenet21k": "imagenet21k_ViT-H_14.npz",
"mae_vith14": "mae_pretrain_vit_huge.pth",
"mae_vitb16": "mae_pretrain_vit_base.pth",
"mae_vitl16": "mae_pretrain_vit_large.pth",
}
def build_mae_model(
model_type, crop_size, prompt_cfg, model_root, adapter_cfg=None
):
if prompt_cfg is not None:
model = prompt_mae_vit_model(model_type, prompt_cfg)
elif adapter_cfg is not None:
model = adapter_mae_vit_model(model_type, adapter_cfg)
else:
model = mae_vit_model(model_type)
out_dim = model.embed_dim
ckpt = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(ckpt, map_location="cpu")
state_dict = checkpoint['model']
model.load_state_dict(state_dict, strict=False)
model.head = torch.nn.Identity()
return model, out_dim | null |
20,423 | import numpy as np
import torch
import os
from .vit_backbones.swin_transformer import SwinTransformer
from .vit_backbones.vit import VisionTransformer
from .vit_backbones.vit_moco import vit_base
from .vit_backbones.vit_mae import build_model as mae_vit_model
from .vit_prompt.vit import PromptedVisionTransformer
from .vit_prompt.swin_transformer import PromptedSwinTransformer
from .vit_prompt.vit_moco import vit_base as prompt_vit_base
from .vit_prompt.vit_mae import build_model as prompt_mae_vit_model
from .vit_adapter.vit_mae import build_model as adapter_mae_vit_model
from .vit_adapter.vit_moco import vit_base as adapter_vit_base
from .vit_adapter.vit import ADPT_VisionTransformer
def vit_base(**kwargs):
model = VisionTransformerMoCo(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
def vit_base(prompt_cfg, **kwargs):
model = PromptedVisionTransformerMoCo(
prompt_cfg,
patch_size=16, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
def vit_base(adapter_cfg, **kwargs):
model = ADPT_VisionTransformerMoCo(
adapter_cfg,
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
def build_mocov3_model(
model_type, crop_size, prompt_cfg, model_root, adapter_cfg=None
):
if model_type != "mocov3_vitb":
raise ValueError("Does not support other arch")
if prompt_cfg is not None:
model = prompt_vit_base(prompt_cfg)
elif adapter_cfg is not None:
model = adapter_vit_base(adapter_cfg)
else:
model = vit_base()
out_dim = 768
ckpt = os.path.join(model_root,"mocov3_linear-vit-b-300ep.pth.tar")
checkpoint = torch.load(ckpt, map_location="cpu")
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
# retain only base_encoder up to before the embedding layer
if k.startswith('module.'):
# remove prefix
state_dict[k[len("module."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
model.load_state_dict(state_dict, strict=False)
model.head = torch.nn.Identity()
return model, out_dim | null |
20,424 | import numpy as np
import torch
import os
from .vit_backbones.swin_transformer import SwinTransformer
from .vit_backbones.vit import VisionTransformer
from .vit_backbones.vit_moco import vit_base
from .vit_backbones.vit_mae import build_model as mae_vit_model
from .vit_prompt.vit import PromptedVisionTransformer
from .vit_prompt.swin_transformer import PromptedSwinTransformer
from .vit_prompt.vit_moco import vit_base as prompt_vit_base
from .vit_prompt.vit_mae import build_model as prompt_mae_vit_model
from .vit_adapter.vit_mae import build_model as adapter_mae_vit_model
from .vit_adapter.vit_moco import vit_base as adapter_vit_base
from .vit_adapter.vit import ADPT_VisionTransformer
def _build_prompted_swin_model(model_type, crop_size, prompt_cfg, model_root):
if model_type == "swint_imagenet":
model = PromptedSwinTransformer(
prompt_cfg,
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.2,
num_classes=-1,
)
embed_dim = 96
num_layers = 4
elif model_type == "swint_imagenet_ssl":
model = PromptedSwinTransformer(
prompt_cfg,
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.2,
num_classes=-1,
)
embed_dim = 96
num_layers = 4
elif model_type == "swins_imagenet":
model = PromptedSwinTransformer(
prompt_cfg,
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.3,
num_classes=-1,
)
embed_dim = 96
num_layers = 4
elif model_type == "swinb_imagenet_224":
model = PromptedSwinTransformer(
prompt_cfg,
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet_384":
model = PromptedSwinTransformer(
prompt_cfg,
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_224":
model = PromptedSwinTransformer(
prompt_cfg,
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_384":
model = PromptedSwinTransformer(
prompt_cfg,
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinl_imagenet22k_224":
model = PromptedSwinTransformer(
prompt_cfg,
img_size=crop_size,
embed_dim=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 192
num_layers = 4
feat_dim = int(embed_dim * 2 ** (num_layers - 1))
# load checkpoint
model_w = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(model_w, map_location='cpu')
state_dict = checkpoint['model']
if crop_size == 448:
for k in list(state_dict.keys()):
if "attn_mask" not in k:
# remove prefix
state_dict[k] = state_dict[k]
# delete renamed or unused k
else:
del state_dict[k]
# rename some keys for ssl models
if model_type.endswith("ssl"):
# rename moco pre-trained keys
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('encoder.'):
# remove prefix
state_dict[k[len("encoder."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
model.load_state_dict(state_dict, strict=False)
return model, feat_dim
def _build_swin_model(model_type, crop_size, model_root):
if model_type == "swint_imagenet":
model = SwinTransformer(
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.2,
num_classes=-1, # setting to a negative value will make head as identity
)
embed_dim = 96
num_layers = 4
elif model_type == "swint_imagenet_ssl":
model = SwinTransformer(
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.2,
num_classes=-1,
)
embed_dim = 96
num_layers = 4
elif model_type == "swins_imagenet":
model = SwinTransformer(
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.3,
num_classes=-1,
)
embed_dim = 96
num_layers = 4
elif model_type == "swinb_imagenet_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet_384":
model = SwinTransformer(
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_384":
model = SwinTransformer(
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinl_imagenet22k_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 192
num_layers = 4
feat_dim = int(embed_dim * 2 ** (num_layers - 1))
# load checkpoint
model_w = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(model_w, map_location='cpu')
state_dict = checkpoint['model']
if crop_size == 448:
for k in list(state_dict.keys()):
if "attn_mask" not in k:
# remove prefix
state_dict[k] = state_dict[k]
# delete renamed or unused k
else:
del state_dict[k]
# rename some keys for ssl models
if model_type.endswith("ssl"):
# rename moco pre-trained keys
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('encoder.'):
# remove prefix
state_dict[k[len("encoder."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
model.load_state_dict(state_dict, strict=False)
return model, feat_dim
def build_swin_model(model_type, crop_size, prompt_cfg, model_root):
if prompt_cfg is not None:
return _build_prompted_swin_model(
model_type, crop_size, prompt_cfg, model_root)
else:
return _build_swin_model(model_type, crop_size, model_root) | null |
20,425 | import numpy as np
import torch
import os
from .vit_backbones.swin_transformer import SwinTransformer
from .vit_backbones.vit import VisionTransformer
from .vit_backbones.vit_moco import vit_base
from .vit_backbones.vit_mae import build_model as mae_vit_model
from .vit_prompt.vit import PromptedVisionTransformer
from .vit_prompt.swin_transformer import PromptedSwinTransformer
from .vit_prompt.vit_moco import vit_base as prompt_vit_base
from .vit_prompt.vit_mae import build_model as prompt_mae_vit_model
from .vit_adapter.vit_mae import build_model as adapter_mae_vit_model
from .vit_adapter.vit_moco import vit_base as adapter_vit_base
from .vit_adapter.vit import ADPT_VisionTransformer
MODEL_ZOO = {
"swint_imagenet": "swin_tiny_patch4_window7_224.pth",
"swint_imagenet_ssl": "moby_swin_t_300ep_pretrained.pth",
"swins_imagenet": "swin_small_patch4_window7_224.pth",
"swinb_imagenet_224": "swin_base_patch4_window7_224.pth",
"swinb_imagenet_384": "swin_base_patch4_window12_384.pth",
"swinb_imagenet22k_224": "swin_base_patch4_window7_224_22k.pth",
"swinb_imagenet22k_384": "swin_base_patch4_window12_384_22k.pth",
"swinl_imagenet22k_224": "swin_large_patch4_window7_224_22k.pth",
"sup_vitb8": "ViT-B_8.npz",
"sup_vitb16_224": "ViT-B_16-224.npz",
"sup_vitb16": "ViT-B_16.npz",
"sup_vitl16_224": "ViT-L_16-224.npz",
"sup_vitl16": "ViT-L_16.npz",
"sup_vitb8_imagenet21k": "imagenet21k_ViT-B_8.npz",
"sup_vitb32_imagenet21k": "imagenet21k_ViT-B_32.npz",
"sup_vitb16_imagenet21k": "imagenet21k_ViT-B_16.npz",
"sup_vitl16_imagenet21k": "imagenet21k_ViT-L_16.npz",
"sup_vitl32_imagenet21k": "imagenet21k_ViT-L_32.npz",
"sup_vith14_imagenet21k": "imagenet21k_ViT-H_14.npz",
"mae_vith14": "mae_pretrain_vit_huge.pth",
"mae_vitb16": "mae_pretrain_vit_base.pth",
"mae_vitl16": "mae_pretrain_vit_large.pth",
}
class VisionTransformer(nn.Module):
def __init__(
self, model_type,
img_size=224, num_classes=21843, vis=False
):
super(VisionTransformer, self).__init__()
config = CONFIGS[model_type]
self.num_classes = num_classes
self.classifier = config.classifier
self.transformer = Transformer(config, img_size, vis)
self.head = Linear(config.hidden_size, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, vis=False):
x, attn_weights = self.transformer(x)
logits = self.head(x[:, 0])
if not vis:
return logits
return logits, attn_weights # attn_weights: num_layers, B, num_head, num_patches, num_patches
def forward_cls_layerwise(self, x):
cls_embeds = self.transformer.forward_cls_layerwise(x)
return cls_embeds
def load_from(self, weights):
with torch.no_grad():
self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True))
self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"]))
self.transformer.embeddings.cls_token.copy_(np2th(weights["cls"]))
self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"]))
self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"]))
posemb = np2th(weights["Transformer/posembed_input/pos_embedding"])
posemb_new = self.transformer.embeddings.position_embeddings
if posemb.size() == posemb_new.size():
self.transformer.embeddings.position_embeddings.copy_(posemb)
else:
logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size()))
ntok_new = posemb_new.size(1)
if self.classifier == "token":
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)
self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))
for bname, block in self.transformer.encoder.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=uname)
if self.transformer.embeddings.hybrid:
self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights["conv_root/kernel"], conv=True))
gn_weight = np2th(weights["gn_root/scale"]).view(-1)
gn_bias = np2th(weights["gn_root/bias"]).view(-1)
self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)
self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)
for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=bname, n_unit=uname)
class PromptedVisionTransformer(VisionTransformer):
def __init__(
self, prompt_cfg, model_type,
img_size=224, num_classes=21843, vis=False
):
assert prompt_cfg.VIT_POOL_TYPE == "original"
super(PromptedVisionTransformer, self).__init__(
model_type, img_size, num_classes, vis)
if prompt_cfg is None:
raise ValueError("prompt_cfg cannot be None if using PromptedVisionTransformer")
self.prompt_cfg = prompt_cfg
vit_cfg = CONFIGS[model_type]
self.transformer = PromptedTransformer(
prompt_cfg, vit_cfg, img_size, vis)
def forward(self, x, vis=False):
x, attn_weights = self.transformer(x)
x = x[:, 0]
logits = self.head(x)
if not vis:
return logits
return logits, attn_weights
class ADPT_VisionTransformer(nn.Module):
def __init__(
self, model_type,
img_size=224, num_classes=21843, vis=False, adapter_cfg=None
):
super(ADPT_VisionTransformer, self).__init__()
config = CONFIGS[model_type]
self.num_classes = num_classes
self.classifier = config.classifier
self.transformer = ADPT_Transformer(config, img_size, vis, adapter_cfg)
self.head = Linear(config.hidden_size, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, vis=False):
x, attn_weights = self.transformer(x)
logits = self.head(x[:, 0])
if not vis:
return logits
return logits, attn_weights
def load_from(self, weights):
with torch.no_grad():
self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True))
self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"]))
self.transformer.embeddings.cls_token.copy_(np2th(weights["cls"]))
self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"]))
self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"]))
posemb = np2th(weights["Transformer/posembed_input/pos_embedding"])
posemb_new = self.transformer.embeddings.position_embeddings
if posemb.size() == posemb_new.size():
self.transformer.embeddings.position_embeddings.copy_(posemb)
else:
logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size()))
ntok_new = posemb_new.size(1)
if self.classifier == "token":
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)
self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))
for bname, block in self.transformer.encoder.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=uname)
if self.transformer.embeddings.hybrid:
self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights["conv_root/kernel"], conv=True))
gn_weight = np2th(weights["gn_root/scale"]).view(-1)
gn_bias = np2th(weights["gn_root/bias"]).view(-1)
self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)
self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)
for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=bname, n_unit=uname)
def build_vit_sup_models(
model_type, crop_size, prompt_cfg=None, model_root=None, adapter_cfg=None, load_pretrain=True, vis=False
):
# image size is the size of actual image
m2featdim = {
"sup_vitb16_224": 768,
"sup_vitb16": 768,
"sup_vitl16_224": 1024,
"sup_vitl16": 1024,
"sup_vitb8_imagenet21k": 768,
"sup_vitb16_imagenet21k": 768,
"sup_vitb32_imagenet21k": 768,
"sup_vitl16_imagenet21k": 1024,
"sup_vitl32_imagenet21k": 1024,
"sup_vith14_imagenet21k": 1280,
}
if prompt_cfg is not None:
model = PromptedVisionTransformer(
prompt_cfg, model_type,
crop_size, num_classes=-1, vis=vis
)
elif adapter_cfg is not None:
model = ADPT_VisionTransformer(model_type, crop_size, num_classes=-1, adapter_cfg=adapter_cfg)
else:
model = VisionTransformer(
model_type, crop_size, num_classes=-1, vis=vis)
if load_pretrain:
model.load_from(np.load(os.path.join(model_root, MODEL_ZOO[model_type])))
return model, m2featdim[model_type] | null |
20,426 | import os
import json
import numpy as np
import time
import pandas as pd
from typing import List, Union
from PIL import Image, ImageFile
def save_or_append_df(out_path, df):
if os.path.exists(out_path):
previous_df = pd.read_pickle(out_path)
df = pd.concat([previous_df, df], ignore_index=True)
df.to_pickle(out_path)
print(f"Saved output at {out_path}") | null |
20,427 | import os
import json
import numpy as np
import time
import pandas as pd
from typing import List, Union
from PIL import Image, ImageFile
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, bytes):
return str(obj, encoding='utf-8')
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
# return super(MyEncoder, self).default(obj)
raise TypeError(
"Unserializable object {} of type {}".format(obj, type(obj))
)
def write_json(data: Union[list, dict], outfile: str) -> None:
json_dir, _ = os.path.split(outfile)
if json_dir and not os.path.exists(json_dir):
os.makedirs(json_dir)
with open(outfile, 'w') as f:
json.dump(data, f, cls=JSONEncoder, ensure_ascii=False, indent=2) | null |
20,428 | import os
import json
import numpy as np
import time
import pandas as pd
from typing import List, Union
from PIL import Image, ImageFile
The provided code snippet includes necessary dependencies for implementing the `read_json` function. Write a Python function `def read_json(filename: str) -> Union[list, dict]` to solve the following problem:
read json files
Here is the function:
def read_json(filename: str) -> Union[list, dict]:
"""read json files"""
with open(filename, "rb") as fin:
data = json.load(fin, encoding="utf-8")
return data | read json files |
20,429 | import os
import json
import numpy as np
import time
import pandas as pd
from typing import List, Union
from PIL import Image, ImageFile
Image.MAX_IMAGE_PIXELS = None
The provided code snippet includes necessary dependencies for implementing the `pil_loader` function. Write a Python function `def pil_loader(path: str) -> Image.Image` to solve the following problem:
load an image from path, and suppress warning
Here is the function:
def pil_loader(path: str) -> Image.Image:
"""load an image from path, and suppress warning"""
# to avoid crashing for truncated (corrupted images)
ImageFile.LOAD_TRUNCATED_IMAGES = True
# open path as file to avoid ResourceWarning
# (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB') | load an image from path, and suppress warning |
20,430 | import builtins
import decimal
import functools
import logging
import simplejson
import sys
import os
from termcolor import colored
from .distributed import is_master_process
from .file_io import PathManager
_FORMAT = "[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s"
def _cached_log_stream(filename):
return PathManager.open(filename, "a")
class _ColorfulFormatter(logging.Formatter):
# from detectron2
def __init__(self, *args, **kwargs):
self._root_name = kwargs.pop("root_name") + "."
self._abbrev_name = kwargs.pop("abbrev_name", "")
if len(self._abbrev_name):
self._abbrev_name = self._abbrev_name + "."
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
def formatMessage(self, record: logging.LogRecord) -> str:
record.name = record.name.replace(self._root_name, self._abbrev_name)
log = super(_ColorfulFormatter, self).formatMessage(record)
if record.levelno == logging.WARNING:
prefix = colored("WARNING", "red", attrs=["blink"])
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
else:
return log
return prefix + " " + log
import logging
PathManager = PathManagerBase()
PathManager.register_handler(HTTPURLHandler())
The provided code snippet includes necessary dependencies for implementing the `setup_single_logging` function. Write a Python function `def setup_single_logging(name, output="")` to solve the following problem:
Sets up the logging.
Here is the function:
def setup_single_logging(name, output=""):
"""Sets up the logging."""
# Enable logging only for the master process
# Clear the root logger to prevent any existing logging config
# (e.g. set by another module) from messing with our setup
logging.root.handlers = []
# Configure logging
logging.basicConfig(
level=logging.INFO, format=_FORMAT, stream=sys.stdout
)
if len(name) == 0:
name = __name__
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.propagate = False
plain_formatter = logging.Formatter(
"[%(asctime)s][%(levelname)s] %(name)s: %(lineno)4d: %(message)s",
datefmt="%m/%d %H:%M:%S",
)
formatter = _ColorfulFormatter(
colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
datefmt="%m/%d %H:%M:%S",
root_name=name,
abbrev_name=str(name),
)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if len(output) > 0:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "logs.txt")
PathManager.mkdirs(os.path.dirname(filename))
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger | Sets up the logging. |
20,431 | import builtins
import decimal
import functools
import logging
import simplejson
import sys
import os
from termcolor import colored
from .distributed import is_master_process
from .file_io import PathManager
def get_logger(name):
"""Retrieves the logger."""
return logging.getLogger(name)
The provided code snippet includes necessary dependencies for implementing the `log_json_stats` function. Write a Python function `def log_json_stats(stats, sort_keys=True)` to solve the following problem:
Logs json stats.
Here is the function:
def log_json_stats(stats, sort_keys=True):
"""Logs json stats."""
# It seems that in Python >= 3.6 json.encoder.FLOAT_REPR has no effect
# Use decimal+string as a workaround for having fixed length values in logs
logger = get_logger(__name__)
stats = {
k: decimal.Decimal("{:.6f}".format(v)) if isinstance(v, float) else v
for k, v in stats.items()
}
json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True)
if stats["_type"] == "test_epoch" or stats["_type"] == "train_epoch":
logger.info("json_stats: {:s}".format(json_stats))
else:
logger.info("{:s}".format(json_stats)) | Logs json stats. |
20,432 | import datetime
import os
import glob
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import warnings
def remove_trailing(eval_dict):
min_num = min([len(v) for k, v in eval_dict.items() if "top5" not in k])
new_dict ={}
for k, v in eval_dict.items():
if "top5" not in k:
new_dict[k] = v[:min_num]
return new_dict | null |
20,433 | import datetime
import os
import glob
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import warnings
def get_nmi(job_path):
with open(job_path) as f:
lines = f.readlines()
nmi_dict = defaultdict(list)
num_jobs = 0
log_temp = []
for l in lines: #, leave=False):
if "Rank of current process:" in l:
num_jobs += 1
if num_jobs == 2:
break
if "Clutering nmi" in l:
n = l.split("Clutering nmi: ")[-1].split(",")[0]
a_n = l.split("adjusted nmi: ")[-1].split(",")[0]
v = l.split("v: ")[-1].split(",")[0]
nmi_dict["nmi"].append(float(n))
nmi_dict["a_nmi"].append(float(a_n))
nmi_dict["v_nmi"].append(float(v))
return nmi_dict | null |
20,434 | import datetime
import os
import glob
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import warnings
def get_mean_accuracy(job_path, data_name):
val_data = torch.load(
job_path.replace("logs.txt", f"val_{data_name}_logits.pth"))
test_data = torch.load(
job_path.replace("logs.txt", f"val_{data_name}_logits.pth"))
v_matrix = confusion_matrix(
val_data['targets'],
np.argmax(val_data['joint_logits'], 1)
)
t_matrix = confusion_matrix(
test_data['targets'],
np.argmax(test_data['joint_logits'], 1)
)
return np.mean(v_matrix.diagonal()/v_matrix.sum(axis=1) ) * 100, np.mean(t_matrix.diagonal()/t_matrix.sum(axis=1) ) * 100 | null |
20,435 | import datetime
import os
import glob
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import warnings
def get_training_data(job_path, model_type, job_root):
data_name, feat_type, lr, wd = get_meta(job_root, job_path, model_type)
with open(job_path) as f:
lines = f.readlines()
# get training loss per epoch,
# cls results for both val and test
train_loss = []
eval_dict = defaultdict(list)
# best_epoch = -1
num_jobs = 0
total_params = -1
gradiented_params = -1
batch_size = None
for line in lines: #, leave=False):
if "{'BATCH_SIZE'" in line and batch_size is None:
batch_size = int(line.split("'BATCH_SIZE': ")[-1].split(",")[0])
if "Total Parameters: " in line:
total_params = int(line.split("Total Parameters: ")[-1].split("\t")[0])
gradiented_params = int(line.split("Gradient Parameters: ")[-1].split("\n")[0])
if "Rank of current process:" in line:
num_jobs += 1
if num_jobs == 2:
break
if "average train loss:" in line:
loss = float(line.split("average train loss: ")[-1])
train_loss.append(loss)
if " Classification results with " in line:
update_eval(line, eval_dict, data_name)
meta_dict = {
"data": data_name,
"feature": feat_type,
"lr": float(lr) * 256 / int(batch_size),
"wd": wd,
"total_params": total_params,
"tuned_params": gradiented_params,
"tuned / total (%)": round(gradiented_params / total_params * 100, 4),
"batch_size": batch_size,
}
v_top1, t_top1 = None, None
return train_loss, eval_dict, meta_dict, (v_top1, t_top1)
def get_time(file):
with open(file) as f:
lines = f.readlines()
start_time = lines[0].split("[")[1].split("]")[0]
start_time = datetime.datetime.strptime(start_time, '%m/%d %H:%M:%S')
end_time = lines[-1].split("[")[1].split("]")[0]
end_time = datetime.datetime.strptime(end_time, '%m/%d %H:%M:%S')
per_iter = None
with open(file) as f:
lines = f.readlines()
per_batch = []
per_batch_train = []
for line in lines[::-1]:
# print(line)"Test 6/6. loss: 6.097, "
if ". loss:" in line and "Test" in line:
per_iter = line.split(" s / batch")[0].split(",")[-1]
per_batch.append(float(per_iter))
if ". train loss:" in line:
per_iter = line.split(" s / batch")[0].split(",")[-1]
per_batch_train.append(float(per_iter))
return datetime.timedelta(seconds=(end_time-start_time).total_seconds()), np.mean(per_batch), np.mean(per_batch_train)
def get_df(files, model_type, root, is_best=True, is_last=True, max_epoch=300):
pd_dict = defaultdict(list)
for job_path in tqdm(files, desc=model_type):
train_loss, eval_results, meta_dict, (v_top1, t_top1) = get_training_data(job_path, model_type, root)
batch_size = meta_dict["batch_size"]
if len(eval_results) == 0:
print(f"job {job_path} not ready")
continue
if len(eval_results["val_top1"]) == 0:
print(f"job {job_path} not ready")
continue
if "val_top1" not in eval_results or "test_top1" not in eval_results:
print(f"inbalanced: {job_path}")
continue
for k, v in meta_dict.items():
pd_dict[k].append(v)
metric_b = "val_top1"
best_epoch = np.argmax(eval_results[metric_b])
if is_best:
for name, val in eval_results.items():
if "top5" in name:
continue
if len(val) == 0:
continue
if not isinstance(val[0], list):
try:
pd_dict["b-" + name].append(val[best_epoch])
except:
pd_dict["b-" + name].append(-1)
# ongoing training process
print(name, best_epoch, val)
# last epoch
if is_last:
if v_top1 is not None:
pd_dict["l-val_top1"].append(v_top1)
pd_dict["l-test_top1"].append(t_top1)
val = eval_results["val_top1"]
else:
for name, val in eval_results.items():
if "top5" in name:
continue
if len(val) == 0:
continue
pd_dict["l-" + name].append(val[-1])
pd_dict["best_epoch"].append(f"{best_epoch + 1} | {len(val)}")
pd_dict["file"].append(job_path)
total_time, _, _ = get_time(job_path)
pd_dict["total_time"].append(total_time)
result_df = None
if len(pd_dict) > 0:
result_df = pd.DataFrame(pd_dict)
result_df = result_df.sort_values(['data', "feature", "lr", "wd"])
return result_df | null |
20,436 | import datetime
import os
import glob
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import warnings
def delete_ckpts(f):
# delete saved ckpts for re
f_dir, _ = os.path.split(f)
for f_delete in glob.glob(f_dir + "/*.pth"):
os.remove(f_delete)
print(f"removed {f_delete}") | null |
20,437 | import datetime
import os
import glob
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import warnings
def average_df(df, metric_names=["l-val_top1", "l-val_base_top1"], take_average=True):
# for each data and features and train type, display the averaged results
data_names = set(list(df["data"]))
f_names = set(list(df["feature"]))
t_names = set(list(df["type"]))
hp_names = [
c for c in df.columns if c not in ["data", "feature", "type", "file", "best_epoch"] + metric_names]
data_dict = defaultdict(list)
for d_name in data_names:
for f_name in f_names:
for t_name in t_names:
result = df[df.data == d_name]
result = result[result.feature == f_name]
result = result[result.type == t_name]
# take average here
if len(result) == 0:
continue
data_dict["data"].append(d_name)
data_dict["feature"].append(f_name)
data_dict["type"].append(t_name)
data_dict["total_runs"].append(len(result))
for m in metric_names:
if take_average:
data_dict[m].append("{:.2f}".format(
np.mean([r for i, r in enumerate(result[m])]),
))
data_dict[f"{m}-std"].append("{:.2f}".format(
np.std([r for i, r in enumerate(result[m])])
))
else:
data_dict[m].append("{:.2f}".format(
np.median([r for i, r in enumerate(result[m])]),
))
for h_name in hp_names:
data_dict[h_name].append(result[h_name].iloc[0])
df = pd.DataFrame(data_dict)
df = df.sort_values(["data", "feature", "type"])
return df | null |
20,438 | import datetime
import os
import glob
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import warnings
def filter_df(df, sorted_cols, max_num):
# for each data and features, display only top max_num runs
data_names = set(list(df["data"]))
f_names = set(list(df["feature"]))
t_names = set(list(df["type"]))
df_list = []
for d_name in data_names:
for f_name in f_names:
for t_name in t_names:
result = df[df.data == d_name]
result = result[result.feature == f_name]
result = result[result.type == t_name]
if len(result) == 0:
continue
cols = [c for c in sorted_cols if c in result.columns]
result = result.sort_values(cols, ignore_index=True)
_num = min([max_num, len(result)])
# print(result.iloc[-_num:])
df_list.append(result.iloc[-_num:])
return pd.concat(df_list)
def display_results(df, sorted_cols=["data", "feature", "type", "l-val_top1"], max_num=1):
cols = [c for c in df.columns if c not in []]
df = df[cols]
if max_num is not None:
df = filter_df(df, sorted_cols[3:], max_num)
return df.sort_values(sorted_cols).reset_index(drop=True) | null |
20,439 | import torch
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run( local_rank, num_proc, func, init_method, shard_id, num_shards, backend, cfg, args, )` to solve the following problem:
Runs a function from a child process. Args: local_rank (int): rank of the current process on the current machine. num_proc (int): number of processes per machine. func (function): function to execute on each of the process. init_method (string): method to initialize the distributed training. TCP initialization: equiring a network address reachable from all processes followed by the port. Shared file-system initialization: makes use of a file system that is shared and visible from all machines. The URL should start with file:// and contain a path to a non-existent file on a shared file system. shard_id (int): the rank of the current machine. num_shards (int): number of overall machines for the distributed training job. backend (string): three distributed backends ('nccl', 'gloo', 'mpi') are supports, each with different capabilities. Details can be found here: https://pytorch.org/docs/stable/distributed.html cfg (CfgNode): configs. Details can be found in loco/config/defaults.py
Here is the function:
def run(
local_rank,
num_proc,
func,
init_method,
shard_id,
num_shards,
backend,
cfg,
args,
):
"""
Runs a function from a child process.
Args:
local_rank (int): rank of the current process on the current machine.
num_proc (int): number of processes per machine.
func (function): function to execute on each of the process.
init_method (string): method to initialize the distributed training.
TCP initialization: equiring a network address reachable from all
processes followed by the port.
Shared file-system initialization: makes use of a file system that
is shared and visible from all machines. The URL should start with
file:// and contain a path to a non-existent file on a shared file
system.
shard_id (int): the rank of the current machine.
num_shards (int): number of overall machines for the distributed
training job.
backend (string): three distributed backends ('nccl', 'gloo', 'mpi') are
supports, each with different capabilities. Details can be found
here:
https://pytorch.org/docs/stable/distributed.html
cfg (CfgNode): configs. Details can be found in
loco/config/defaults.py
"""
# Initialize the process group.
# shard_id = get_rank()
world_size = num_proc * num_shards
rank = shard_id * num_proc + local_rank
try:
torch.distributed.init_process_group(
backend=backend,
init_method=init_method,
world_size=world_size,
rank=rank,
)
except Exception as e:
raise e
torch.cuda.set_device(local_rank)
func(cfg, args) | Runs a function from a child process. Args: local_rank (int): rank of the current process on the current machine. num_proc (int): number of processes per machine. func (function): function to execute on each of the process. init_method (string): method to initialize the distributed training. TCP initialization: equiring a network address reachable from all processes followed by the port. Shared file-system initialization: makes use of a file system that is shared and visible from all machines. The URL should start with file:// and contain a path to a non-existent file on a shared file system. shard_id (int): the rank of the current machine. num_shards (int): number of overall machines for the distributed training job. backend (string): three distributed backends ('nccl', 'gloo', 'mpi') are supports, each with different capabilities. Details can be found here: https://pytorch.org/docs/stable/distributed.html cfg (CfgNode): configs. Details can be found in loco/config/defaults.py |
20,440 | import torch
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `destroy_process_group` function. Write a Python function `def destroy_process_group()` to solve the following problem:
Destroys the default process group.
Here is the function:
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group() | Destroys the default process group. |
20,441 | import torch
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `scaled_all_reduce` function. Write a Python function `def scaled_all_reduce(cfg, tensors)` to solve the following problem:
Performs the scaled all_reduce operation on the provided tensors. The input tensors are modified in-place. Currently supports only the sum reduction operator. The reduced values are scaled by the inverse size of the process group (equivalent to cfg.NUM_GPUS).
Here is the function:
def scaled_all_reduce(cfg, tensors):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of
the process group (equivalent to cfg.NUM_GPUS).
"""
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
for tensor in tensors:
tensor.mul_(1.0 / cfg.NUM_GPUS / cfg.NUM_SHARDS)
return tensors | Performs the scaled all_reduce operation on the provided tensors. The input tensors are modified in-place. Currently supports only the sum reduction operator. The reduced values are scaled by the inverse size of the process group (equivalent to cfg.NUM_GPUS). |
20,442 | import torch
import torch.distributed as dist
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `cat_all_gather` function. Write a Python function `def cat_all_gather(tensors)` to solve the following problem:
Performs the concatenated all_gather operation on the provided tensors.
Here is the function:
def cat_all_gather(tensors):
"""Performs the concatenated all_gather operation on the provided tensors.
"""
tensors_gather = [
torch.ones_like(tensors)
for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensors, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output | Performs the concatenated all_gather operation on the provided tensors. |
20,443 | import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_local_size():
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
The provided code snippet includes necessary dependencies for implementing the `local_cat_all_gather` function. Write a Python function `def local_cat_all_gather(tensors)` to solve the following problem:
Performs the concatenated all_gather operation on the provided tensors.
Here is the function:
def local_cat_all_gather(tensors):
"""Performs the concatenated all_gather operation on the provided tensors.
"""
tensors_gather = [
torch.ones_like(tensors)
for _ in range(get_local_size())
]
torch.distributed.all_gather(
tensors_gather,
tensors,
async_op=False,
group=_LOCAL_PROCESS_GROUP,
)
output = torch.cat(tensors_gather, dim=0)
return output | Performs the concatenated all_gather operation on the provided tensors. |
20,444 | import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
The provided code snippet includes necessary dependencies for implementing the `get_local_rank` function. Write a Python function `def get_local_rank()` to solve the following problem:
Returns: The rank of the current process within the local (per-machine) process group.
Here is the function:
def get_local_rank():
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP) | Returns: The rank of the current process within the local (per-machine) process group. |
20,445 | import torch
The provided code snippet includes necessary dependencies for implementing the `gpu_mem_usage` function. Write a Python function `def gpu_mem_usage()` to solve the following problem:
Computes the GPU memory usage for the current device (GB).
Here is the function:
def gpu_mem_usage():
"""Computes the GPU memory usage for the current device (GB)."""
if not torch.cuda.is_available():
return 0
# Number of bytes in a megabyte
_B_IN_GB = 1024 * 1024 * 1024
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / _B_IN_GB | Computes the GPU memory usage for the current device (GB). |
20,446 | import glob
import numpy as np
import os
import torch
import warnings
import random
from time import sleep
from random import randint
import src.utils.logging as logging
from src.configs.config import get_cfg
from src.data import loader as data_loader
from src.engine.evaluator import Evaluator
from src.engine.trainer import Trainer
from src.models.build_model import build_model
from src.utils.file_io import PathManager
from launch import default_argument_parser, logging_train_setup
def find_best_lrwd(files, data_name):
t_name = "val_" + data_name
best_lr = None
best_wd = None
best_val_acc = -1
for f in files:
try:
results_dict = torch.load(f, "cpu")
epoch = len(results_dict) - 1
val_result = results_dict[f"epoch_{epoch}"]["classification"][t_name]["top1"]
val_result = float(val_result)
except Exception as e:
print(f"Encounter issue: {e} for file {f}")
continue
if val_result == best_val_acc:
frag_txt = f.split("/run")[0]
cur_lr = float(frag_txt.split("/lr")[-1].split("_wd")[0])
cur_wd = float(frag_txt.split("_wd")[-1])
if best_lr is not None and cur_lr < best_lr:
# get the smallest lr to break tie for stability
best_lr = cur_lr
best_wd = cur_wd
best_val_acc = val_result
elif val_result > best_val_acc:
best_val_acc = val_result
frag_txt = f.split("/run")[0]
best_lr = float(frag_txt.split("/lr")[-1].split("_wd")[0])
best_wd = float(frag_txt.split("_wd")[-1])
return best_lr, best_wd
def get_cfg():
"""
Get a copy of the default config.
"""
return _C.clone()
PathManager = PathManagerBase()
PathManager.register_handler(HTTPURLHandler())
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `def setup(args, lr, wd, final_runs, run_idx=None, seed=None)` to solve the following problem:
Create configs and perform basic setups.
Here is the function:
def setup(args, lr, wd, final_runs, run_idx=None, seed=None):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.SEED = seed
# create the clsemb_path for this dataset, only support vitb-sup experiments
if cfg.DATA.FEATURE == "sup_vitb16_imagenet21k":
cfg.MODEL.PROMPT.CLSEMB_PATH = os.path.join(
cfg.MODEL.PROMPT.CLSEMB_FOLDER, "{}.npy".format(cfg.DATA.NAME))
if not final_runs:
cfg.RUN_N_TIMES = 1
cfg.MODEL.SAVE_CKPT = False
cfg.OUTPUT_DIR = cfg.OUTPUT_DIR + "_val"
lr = lr / 256 * cfg.DATA.BATCH_SIZE # update lr based on the batchsize
cfg.SOLVER.BASE_LR = lr
cfg.SOLVER.WEIGHT_DECAY = wd
else:
cfg.RUN_N_TIMES = 5
cfg.MODEL.SAVE_CKPT = False
# find the best lr and best wd
files = glob.glob(f"{cfg.OUTPUT_DIR}_val/{cfg.DATA.NAME}/{cfg.DATA.FEATURE}/*/run1/eval_results.pth")
lr, wd = find_best_lrwd(files, cfg.DATA.NAME)
cfg.OUTPUT_DIR = cfg.OUTPUT_DIR + "_finalfinal"
cfg.SOLVER.BASE_LR = lr
cfg.SOLVER.WEIGHT_DECAY = wd
# setup output dir
# output_dir / data_name / feature_name / lr_wd / run1
output_dir = cfg.OUTPUT_DIR
output_folder = os.path.join(
cfg.DATA.NAME, cfg.DATA.FEATURE, f"lr{lr}_wd{wd}"
)
# train cfg.RUN_N_TIMES times
if run_idx is None:
count = 1
while count <= cfg.RUN_N_TIMES:
output_path = os.path.join(output_dir, output_folder, f"run{count}")
# pause for a random time, so concurrent process with same setting won't interfere with each other. # noqa
sleep(randint(1, 5))
if not PathManager.exists(output_path):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
break
else:
count += 1
if count > cfg.RUN_N_TIMES:
raise ValueError(
f"Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more")
else:
output_path = os.path.join(output_dir, output_folder, f"run{run_idx}")
if not PathManager.exists(output_path):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
else:
raise ValueError(
f"Already run run-{run_idx} for {output_folder}, no need to run more")
cfg.freeze()
return cfg | Create configs and perform basic setups. |
20,447 | import glob
import numpy as np
import os
import torch
import warnings
import random
from time import sleep
from random import randint
import src.utils.logging as logging
from src.configs.config import get_cfg
from src.data import loader as data_loader
from src.engine.evaluator import Evaluator
from src.engine.trainer import Trainer
from src.models.build_model import build_model
from src.utils.file_io import PathManager
from launch import default_argument_parser, logging_train_setup
def get_loaders(cfg, logger, final_runs=False):
class Evaluator():
def __init__(
self,
) -> None:
def update_iteration(self, iteration: int) -> None:
def update_result(self, metric: str, value: Union[float, dict]) -> None:
def classify(self, probs, targets, test_data, multilabel=False):
def _eval_singlelabel(
self,
scores: np.ndarray,
targets: List[int],
eval_type: str
) -> None:
def _eval_multilabel(
self,
scores: np.ndarray,
targets: np.ndarray,
eval_type: str
) -> None:
def log_and_update(self, log_results, save_results, eval_type):
class Trainer():
def __init__(
self,
cfg: CfgNode,
model: nn.Module,
evaluator: Evaluator,
device: torch.device,
) -> None:
def forward_one_batch(self, inputs, targets, is_train):
def get_input(self, data):
def train_classifier(self, train_loader, val_loader, test_loader):
def save_prompt(self, epoch):
def eval_classifier(self, data_loader, prefix, save=False):
def build_model(cfg):
def logging_train_setup(args, cfg) -> None:
def train(cfg, args, final_runs):
# clear up residual cache from previous runs
if torch.cuda.is_available():
torch.cuda.empty_cache()
# main training / eval actions here
# fix the seed for reproducibility
if cfg.SEED is not None:
torch.manual_seed(cfg.SEED)
np.random.seed(cfg.SEED)
random.seed(0)
# setup training env including loggers
logging_train_setup(args, cfg)
logger = logging.get_logger("visual_prompt")
train_loader, val_loader, test_loader = get_loaders(
cfg, logger, final_runs)
logger.info("Constructing models...")
model, cur_device = build_model(cfg)
logger.info("Setting up Evalutator...")
evaluator = Evaluator()
logger.info("Setting up Trainer...")
trainer = Trainer(cfg, model, evaluator, cur_device)
if train_loader:
trainer.train_classifier(train_loader, val_loader, test_loader)
# save the evaluation results
torch.save(
evaluator.results,
os.path.join(cfg.OUTPUT_DIR, "eval_results.pth")
)
else:
print("No train loader presented. Exit") | null |
20,448 | import glob
import numpy as np
import os
import torch
import warnings
import random
from time import sleep
from random import randint
import src.utils.logging as logging
from src.configs.config import get_cfg
from src.data import loader as data_loader
from src.engine.evaluator import Evaluator
from src.engine.trainer import Trainer
from src.models.build_model import build_model
from src.utils.file_io import PathManager
from launch import default_argument_parser, logging_train_setup
def get_lrwd_range(args):
if args.train_type == "finetune":
lr_range = [0.001, 0.0001, 0.0005, 0.005]
wd_range = [0.01, 0.001, 0.0001, 0.0]
elif args.train_type == "finetune_resnet":
lr_range = [
0.0005, 0.00025,
0.5, 0.25, 0.05, 0.025, 0.005, 0.0025,
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
elif args.train_type == "linear":
lr_range = [
50.0, 25., 10.0,
5.0, 2.5, 1.0,
0.5, 0.25, 0.1, 0.05
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
elif args.train_type == "linear_mae":
lr_range = [
50.0, 25., 10.0,
5.0, 2.5, 1.0,
0.5, 0.25, 0.1, 0.05,
0.025, 0.005, 0.0025,
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
elif args.train_type == "prompt":
lr_range = [
5.0, 2.5, 1.0,
50.0, 25., 10.0,
0.5, 0.25, 0.1, 0.05
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
elif args.train_type == "prompt_largerlr":
lr_range = [
500, 1000, 250., 100.0,
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
elif args.train_type == "prompt_resnet":
lr_range = [
0.05, 0.025, 0.01, 0.5, 0.25, 0.1,
1.0, 2.5, 5.
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
return lr_range, wd_range | null |
20,449 | import argparse
import os
import sys
import pprint
import PIL
from collections import defaultdict
from tabulate import tabulate
from typing import Tuple
import torch
from src.utils.file_io import PathManager
from src.utils import logging
from src.utils.distributed import get_rank, get_world_size
The provided code snippet includes necessary dependencies for implementing the `default_argument_parser` function. Write a Python function `def default_argument_parser()` to solve the following problem:
create a simple parser to wrap around config file
Here is the function:
def default_argument_parser():
"""
create a simple parser to wrap around config file
"""
parser = argparse.ArgumentParser(description="visual-prompt")
parser.add_argument(
"--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--train-type", default="", help="training types")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser | create a simple parser to wrap around config file |
20,450 | import os
import re
import subprocess
from time import sleep
from Mooc.Mooc_Config import *
RE_SPEED = re.compile(r'\d+MiB/(\d+)MiB\((\d+)%\).*?DL:(\d*?\.?\d*?)([KM])iB')
RE_AVESPEED = re.compile(r'\|\s*?([\S]*?)([KM])iB/s\|')
class DownloadFailed(Exception):
pass
def clear_files(dirname, filename):
filepath = os.path.join(dirname, filename)
if os.path.exists(filepath):
os.remove(filepath)
if os.path.exists(filepath+'.aria2'):
os.remove(filepath+'.aria2')
def aria2_download_file(url, filename, dirname='.'):
cnt = 0
while cnt < 3:
p = None
try:
cmd = aira2_cmd.format(url=url, dirname=dirname, filename=filename)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True, encoding='utf8')
lines = ''
while p.poll() is None:
line = p.stdout.readline().strip()
if filename.endswith('.mp4') and line:
lines += line
match = RE_SPEED.search(line)
if match:
size, percent, speed, unit = match.groups()
percent = float(percent)
speed = float(speed)
if unit == 'K':
speed /= 1024
per = min(int(LENGTH*percent/100) , LENGTH)
print('\r |-['+per*'*'+(LENGTH-per)*'.'+'] {:.0f}% {:.2f}M/s'.format(percent,speed),end=' (ctrl+c中断)')
if p.returncode != 0:
cnt += 1
if cnt==1:
clear_files(dirname, filename)
sleep(0.16)
else:
if filename.endswith('.mp4'):
match = RE_AVESPEED.search(lines)
if match:
ave_speed, unit = match.groups()
ave_speed = float(ave_speed)
if unit == 'K':
ave_speed /= 1024
print('\r |-['+LENGTH*'*'+'] {:.0f}% {:.2f}M/s'.format(100,ave_speed),end=' (完成) \n')
return
finally:
if p:
p.kill() # 保证子进程已终止
clear_files(dirname, filename)
raise DownloadFailed("download failed") | null |
20,451 | from time import sleep
from functools import wraps
from socket import timeout, setdefaulttimeout
from urllib import request, parse
from urllib.error import ContentTooShortError, URLError, HTTPError
from Mooc.Mooc_Config import *
class RequestFailed(Exception):
pass
def request_decorate(count=3):
def decorate(func):
@wraps(func)
def wrap_func(*args, **kwargs):
cnt = 0
while True:
try:
return func(*args, **kwargs)
except (ContentTooShortError, URLError, HTTPError, ConnectionResetError):
cnt += 1
if cnt >= count:
break
sleep(0.32)
except (timeout):
break
raise RequestFailed("request failed")
return wrap_func
return decorate | null |
20,452 | from time import sleep
from functools import wraps
from socket import timeout, setdefaulttimeout
from urllib import request, parse
from urllib.error import ContentTooShortError, URLError, HTTPError
from Mooc.Mooc_Config import *
request.install_opener(opener)
The provided code snippet includes necessary dependencies for implementing the `request_get` function. Write a Python function `def request_get(url, decoding='utf8')` to solve the following problem:
get请求
Here is the function:
def request_get(url, decoding='utf8'):
'''get请求'''
req = request.Request(url=url)
response = request.urlopen(req, timeout=TIMEOUT)
text = response.read().decode(decoding)
response.close()
return text | get请求 |
20,453 | from time import sleep
from functools import wraps
from socket import timeout, setdefaulttimeout
from urllib import request, parse
from urllib.error import ContentTooShortError, URLError, HTTPError
from Mooc.Mooc_Config import *
request.install_opener(opener)
The provided code snippet includes necessary dependencies for implementing the `request_post` function. Write a Python function `def request_post(url, data, decoding='utf8')` to solve the following problem:
post请求
Here is the function:
def request_post(url, data, decoding='utf8'):
'''post请求'''
data = parse.urlencode(data).encode('utf8')
req = request.Request(url=url, data=data, method='POST')
response = request.urlopen(req, timeout=TIMEOUT)
text = response.read().decode(decoding)
response.close()
return text | post请求 |
20,454 | from time import sleep
from functools import wraps
from socket import timeout, setdefaulttimeout
from urllib import request, parse
from urllib.error import ContentTooShortError, URLError, HTTPError
from Mooc.Mooc_Config import *
request.install_opener(opener)
The provided code snippet includes necessary dependencies for implementing the `request_head` function. Write a Python function `def request_head(url)` to solve the following problem:
head请求
Here is the function:
def request_head(url):
'''head请求'''
req = request.Request(url=url);
response = request.urlopen(req, timeout=TIMEOUT)
header = dict(response.getheaders())
response.close()
return header | head请求 |
20,455 | from time import sleep
from functools import wraps
from socket import timeout, setdefaulttimeout
from urllib import request, parse
from urllib.error import ContentTooShortError, URLError, HTTPError
from Mooc.Mooc_Config import *
request.install_opener(opener)
The provided code snippet includes necessary dependencies for implementing the `request_check` function. Write a Python function `def request_check(url)` to solve the following problem:
检查url是否可以访问
Here is the function:
def request_check(url):
'''检查url是否可以访问'''
req = request.Request(url=url);
response = request.urlopen(req, timeout=TIMEOUT//10)
response.close() | 检查url是否可以访问 |
20,456 | import os
import re
from Mooc.Mooc_Config import *
from Mooc.Mooc_Request import *
from Mooc.Mooc_Download import *
from Mooc.Icourse163.Icourse163_Mooc import *
from Mooc.Icourses.Icourse_Cuoc import *
from Mooc.Icourses.Icourse_Mooc import *
def inquire():
redown = None
while redown not in ('y','n'):
try:
redown = input("是否继续[y/n]: ")
except (KeyboardInterrupt, EOFError):
print()
return redown=='y'
def match_mooc(url):
mooc = None
for mooc_name in courses_mooc:
if courses_re.get(mooc_name).match(url):
mooc = courses_mooc.get(mooc_name)()
break
return mooc
def mooc_interface():
try:
while True:
os.system("cls")
print("\t"+"="*91)
print('\t|\t\t 慕课下载器(免费版v3.4.2) \tQQ群: {:^27s} |'.format(__QQgroup__))
print("\t|\t\t icourse163.org, icourses.cn \t邮箱: {:^27s} |".format(__email__))
print("\t"+"="*91)
print("\t{:^90}".format("Github: https://github.com/PyJun/Mooc_Downloader"))
print("\t{:^90}".format("博客: https://blog.csdn.net/qq_16166591/article/details/85249743"))
print("\t{:^90}".format("下载路径: "+PATH))
urlstr = None
while not urlstr:
try:
urlstr = input('\n输入一个视频课程网址(q退出): ')
except KeyboardInterrupt:
print()
if urlstr == 'q':
break
mooc = match_mooc(urlstr)
if not mooc:
input("视频课程链接不合法,请回车继续...")
continue
if not mooc.set_mode():
continue
print("正在连接资源......")
try:
mooc.prepare(urlstr)
except RequestFailed:
print("网路请求异常!")
input("请按回车键返回主界面...")
continue
while True:
try:
isdownload = mooc.download()
if isdownload:
print('"{}" 下载完毕!'.format(mooc.title))
print("下载路径: {}".format(mooc.rootDir))
os.startfile(mooc.rootDir)
else:
print('"{}" 还未开课!'.format(mooc.title))
input("请按回车键返回主界面...")
break
except (RequestFailed, DownloadFailed) as err:
if isinstance(err, RequestFailed):
print("网路请求异常!")
else:
print("文件下载异常!")
if inquire():
continue
else:
break
except KeyboardInterrupt:
print()
if inquire():
continue
else:
break
except:
print("程序异常退出,希望反馈作者!")
return
except KeyboardInterrupt:
input("程序退出...")
finally:
# if (input("\n小哥哥,小姐姐,打个赏再走呗 …(⊙_⊙)… [y/n]: ") != 'n'):
# os.startfile(alipay_path)
os.system("pause") | null |
20,457 | import sys
import os
print(root_dir)
from sanic import Sanic
from sanic.response import json
from qanything_kernel.dependent_server.rerank_for_local_serve.rerank_server_backend import LocalRerankBackend
app = Sanic("rerank_server")
class LocalRerankBackend:
def __init__(self):
tokenizer_path = 'qanything_kernel/dependent_server/rerank_for_local_serve/reranker_model_yd_1225'
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
self.overlap_tokens = 80
self.spe_id = self.tokenizer.sep_token_id
self.batch_size = LOCAL_RERANK_BATCH
self.max_length = LOCAL_RERANK_MAX_LENGTH
self.model_name = LOCAL_RERANK_MODEL_NAME
# 创建Triton客户端实例
self.triton_client = grpcclient.InferenceServerClient(url=LOCAL_RERANK_SERVICE_URL)
def inference(self, serialized_inputs):
# 准备输入数据
inputs = []
for input_name, data in serialized_inputs.items():
infer_input = grpcclient.InferInput(input_name, data.shape, grpcclient.np_to_triton_dtype(data.dtype))
infer_input.set_data_from_numpy(data)
inputs.append(infer_input)
# 准备输出
outputs = []
output_name = "logits"
outputs.append(grpcclient.InferRequestedOutput(output_name))
# 发送推理请求
start_time = time.time()
response = self.triton_client.infer(self.model_name, inputs, outputs=outputs)
print('local rerank infer time: {} s'.format(time.time() - start_time), flush=True)
# 获取响应数据
result_data = response.as_numpy(output_name)
print('rerank res:', result_data, flush=True)
# 应用sigmoid函数
sigmoid_scores = 1 / (1 + np.exp(-result_data))
return sigmoid_scores.reshape(-1).tolist()
def merge_inputs(self, chunk1_raw, chunk2):
chunk1 = deepcopy(chunk1_raw)
chunk1['input_ids'].extend(chunk2['input_ids'])
chunk1['input_ids'].append(self.spe_id)
chunk1['attention_mask'].extend(chunk2['attention_mask'])
chunk1['attention_mask'].append(chunk2['attention_mask'][0])
if 'token_type_ids' in chunk1:
token_type_ids = [1 for _ in range(len(chunk2['token_type_ids']) + 1)]
chunk1['token_type_ids'].extend(token_type_ids)
return chunk1
def tokenize_preproc(self,
query: str,
passages: List[str],
):
query_inputs = self.tokenizer.encode_plus(query, truncation=False, padding=False)
max_passage_inputs_length = self.max_length - len(query_inputs['input_ids']) - 1
assert max_passage_inputs_length > 10
overlap_tokens = min(self.overlap_tokens, max_passage_inputs_length * 2 // 7)
# 组[query, passage]对
merge_inputs = []
merge_inputs_idxs = []
for pid, passage in enumerate(passages):
passage_inputs = self.tokenizer.encode_plus(passage, truncation=False, padding=False,
add_special_tokens=False)
passage_inputs_length = len(passage_inputs['input_ids'])
if passage_inputs_length <= max_passage_inputs_length:
qp_merge_inputs = self.merge_inputs(query_inputs, passage_inputs)
merge_inputs.append(qp_merge_inputs)
merge_inputs_idxs.append(pid)
else:
start_id = 0
while start_id < passage_inputs_length:
end_id = start_id + max_passage_inputs_length
sub_passage_inputs = {k: v[start_id:end_id] for k, v in passage_inputs.items()}
start_id = end_id - overlap_tokens if end_id < passage_inputs_length else end_id
qp_merge_inputs = self.merge_inputs(query_inputs, sub_passage_inputs)
merge_inputs.append(qp_merge_inputs)
merge_inputs_idxs.append(pid)
return merge_inputs, merge_inputs_idxs
def predict(self,
query: str,
passages: List[str],
):
tot_batches, merge_inputs_idxs_sort = self.tokenize_preproc(query, passages)
tot_scores = []
for k in range(0, len(tot_batches), self.batch_size):
batch = self.tokenizer.pad(
tot_batches[k:k + self.batch_size],
padding=True,
max_length=None,
pad_to_multiple_of=None,
return_tensors="np"
)
scores = self.inference(batch)
tot_scores.extend(scores)
merge_tot_scores = [0 for _ in range(len(passages))]
for pid, score in zip(merge_inputs_idxs_sort, tot_scores):
merge_tot_scores[pid] = max(merge_tot_scores[pid], score)
print("merge_tot_scores:", merge_tot_scores, flush=True)
return merge_tot_scores
async def rerank(request):
query = request.json.get("query")
passgaes = request.json.get("passages")
local_rerank_backend: LocalRerankBackend = request.app.ctx.local_rerank_backend
print("local rerank query:", query, flush=True)
print("local rerank passages number:", len(passgaes), flush=True)
print("local rerank passages:", passgaes, flush=True)
result_data = local_rerank_backend.predict(query, passgaes)
return json(result_data) | null |
20,458 | import sys
import os
from sanic import Sanic
from sanic.response import json
from qanything_kernel.dependent_server.rerank_for_local_serve.rerank_server_backend import LocalRerankBackend
class LocalRerankBackend:
def __init__(self):
tokenizer_path = 'qanything_kernel/dependent_server/rerank_for_local_serve/reranker_model_yd_1225'
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
self.overlap_tokens = 80
self.spe_id = self.tokenizer.sep_token_id
self.batch_size = LOCAL_RERANK_BATCH
self.max_length = LOCAL_RERANK_MAX_LENGTH
self.model_name = LOCAL_RERANK_MODEL_NAME
# 创建Triton客户端实例
self.triton_client = grpcclient.InferenceServerClient(url=LOCAL_RERANK_SERVICE_URL)
def inference(self, serialized_inputs):
# 准备输入数据
inputs = []
for input_name, data in serialized_inputs.items():
infer_input = grpcclient.InferInput(input_name, data.shape, grpcclient.np_to_triton_dtype(data.dtype))
infer_input.set_data_from_numpy(data)
inputs.append(infer_input)
# 准备输出
outputs = []
output_name = "logits"
outputs.append(grpcclient.InferRequestedOutput(output_name))
# 发送推理请求
start_time = time.time()
response = self.triton_client.infer(self.model_name, inputs, outputs=outputs)
print('local rerank infer time: {} s'.format(time.time() - start_time), flush=True)
# 获取响应数据
result_data = response.as_numpy(output_name)
print('rerank res:', result_data, flush=True)
# 应用sigmoid函数
sigmoid_scores = 1 / (1 + np.exp(-result_data))
return sigmoid_scores.reshape(-1).tolist()
def merge_inputs(self, chunk1_raw, chunk2):
chunk1 = deepcopy(chunk1_raw)
chunk1['input_ids'].extend(chunk2['input_ids'])
chunk1['input_ids'].append(self.spe_id)
chunk1['attention_mask'].extend(chunk2['attention_mask'])
chunk1['attention_mask'].append(chunk2['attention_mask'][0])
if 'token_type_ids' in chunk1:
token_type_ids = [1 for _ in range(len(chunk2['token_type_ids']) + 1)]
chunk1['token_type_ids'].extend(token_type_ids)
return chunk1
def tokenize_preproc(self,
query: str,
passages: List[str],
):
query_inputs = self.tokenizer.encode_plus(query, truncation=False, padding=False)
max_passage_inputs_length = self.max_length - len(query_inputs['input_ids']) - 1
assert max_passage_inputs_length > 10
overlap_tokens = min(self.overlap_tokens, max_passage_inputs_length * 2 // 7)
# 组[query, passage]对
merge_inputs = []
merge_inputs_idxs = []
for pid, passage in enumerate(passages):
passage_inputs = self.tokenizer.encode_plus(passage, truncation=False, padding=False,
add_special_tokens=False)
passage_inputs_length = len(passage_inputs['input_ids'])
if passage_inputs_length <= max_passage_inputs_length:
qp_merge_inputs = self.merge_inputs(query_inputs, passage_inputs)
merge_inputs.append(qp_merge_inputs)
merge_inputs_idxs.append(pid)
else:
start_id = 0
while start_id < passage_inputs_length:
end_id = start_id + max_passage_inputs_length
sub_passage_inputs = {k: v[start_id:end_id] for k, v in passage_inputs.items()}
start_id = end_id - overlap_tokens if end_id < passage_inputs_length else end_id
qp_merge_inputs = self.merge_inputs(query_inputs, sub_passage_inputs)
merge_inputs.append(qp_merge_inputs)
merge_inputs_idxs.append(pid)
return merge_inputs, merge_inputs_idxs
def predict(self,
query: str,
passages: List[str],
):
tot_batches, merge_inputs_idxs_sort = self.tokenize_preproc(query, passages)
tot_scores = []
for k in range(0, len(tot_batches), self.batch_size):
batch = self.tokenizer.pad(
tot_batches[k:k + self.batch_size],
padding=True,
max_length=None,
pad_to_multiple_of=None,
return_tensors="np"
)
scores = self.inference(batch)
tot_scores.extend(scores)
merge_tot_scores = [0 for _ in range(len(passages))]
for pid, score in zip(merge_inputs_idxs_sort, tot_scores):
merge_tot_scores[pid] = max(merge_tot_scores[pid], score)
print("merge_tot_scores:", merge_tot_scores, flush=True)
return merge_tot_scores
async def init_local_doc_qa(app, loop):
app.ctx.local_rerank_backend = LocalRerankBackend() | null |
20,459 |
async def ocr_request(request):
# 获取上传的文件
input = request.json
img_file = input['img64']
height = input['height']
width = input['width']
channels = input['channels']
binary_data = base64.b64decode(img_file)
img_array = np.frombuffer(binary_data, dtype=np.uint8).reshape((height, width, channels))
logger.info("shape: {}".format(img_array.shape))
# 无文件上传,返回错误
if not img_file:
return response.json({'error': 'No file was uploaded.'}, status=400)
# 调用 PaddleOCR 进行识别
res = ocr_engine.ocr(img_array)
logger.info("ocr result: {}".format(res))
# 返回识别结果
return response.json({'results': res}) | null |
20,460 | import enum
import logging
import time
from datetime import datetime
from enum import Enum
def log_timestamp() -> str:
return datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S.%f") | null |
20,461 | import argparse
import asyncio
import json
import logging
import multiprocessing as mp
import os
import psutil
import queue
import string
import signal
import sys
import traceback
import time
import threading
import sanic
from sanic import Sanic, Request
from sanic.response import ResponseStream
from collections import OrderedDict
from datetime import datetime
from transformers import AutoTokenizer
from tritonclient.utils import InferenceServerException
from typing import List, Tuple, Dict, Optional, Any
from urllib.parse import unquote
sys.path.append("./")
from modeling_qwen import QwenTritonModel
from utils import log_timestamp, CODES
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def signal_handler(signum, frame) -> None:
signal_ = "unknown signal"
if signum == signal.SIGINT:
signal_ = "signal.SIGINT"
elif signum == signal.SIGTERM:
signal_ = "signal.SIGTERM"
for proc_ in mp.active_children():
os.kill(proc_.pid, signal.SIGINT)
sys.exit(0) | null |
20,462 | import argparse
import asyncio
import json
import logging
import multiprocessing as mp
import os
import psutil
import queue
import string
import signal
import sys
import traceback
import time
import threading
import sanic
from sanic import Sanic, Request
from sanic.response import ResponseStream
from collections import OrderedDict
from datetime import datetime
from transformers import AutoTokenizer
from tritonclient.utils import InferenceServerException
from typing import List, Tuple, Dict, Optional, Any
from urllib.parse import unquote
from modeling_qwen import QwenTritonModel
from utils import log_timestamp, CODES
def is_process_running(pid: int) -> bool:
try:
psutil.Process(pid)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
return False
else:
return True | null |
20,463 | import argparse
import asyncio
import json
import logging
import multiprocessing as mp
import os
import psutil
import queue
import string
import signal
import sys
import traceback
import time
import threading
import sanic
from sanic import Sanic, Request
from sanic.response import ResponseStream
from collections import OrderedDict
from datetime import datetime
from transformers import AutoTokenizer
from tritonclient.utils import InferenceServerException
from typing import List, Tuple, Dict, Optional, Any
from urllib.parse import unquote
from modeling_qwen import QwenTritonModel
from utils import log_timestamp, CODES
model = QwenTritonModel(model_url=args.model_url)
async def check_input(request: Request):
params = await request.json()
assert (isinstance(params, dict) or isinstance(params,
OrderedDict)), "params were expected as dict or OrderedDict, but got {}.".format(
type(params))
if type(params.get('hist_messages', {})) == str:
unquote_messages = unquote(params.get('hist_messages', {}))
params['hist_messages'] = json.loads(unquote_messages)
prompt = params.get('prompt', "")
if params.get('url_encode', False):
params['prompt'] = unquote(prompt)
else:
params['prompt'] = prompt
message = params.get("prompt", "")
hist_messages = params.get("hist_messages", OrderedDict())
max_new_tokens = params.get("max_new_tokens", model.max_new_tokens)
## 返回 prompt + hist_messages 的 token 总数
tokens_number = model.check_query_tokens(message, max_new_tokens, hist_messages=hist_messages, prefix=None,
response="")
return sanic.response.text(str(tokens_number)) | null |
20,464 | import argparse
import asyncio
import json
import logging
import multiprocessing as mp
import os
import psutil
import queue
import string
import signal
import sys
import traceback
import time
import threading
import sanic
from sanic import Sanic, Request
from sanic.response import ResponseStream
from collections import OrderedDict
from datetime import datetime
from transformers import AutoTokenizer
from tritonclient.utils import InferenceServerException
from typing import List, Tuple, Dict, Optional, Any
from urllib.parse import unquote
from modeling_qwen import QwenTritonModel
from utils import log_timestamp, CODES
global_counter = 0
model_semaphore = None
args = parser.parse_args()
def generator_llm(params: OrderedDict) -> str:
def insert_error(resp_data: dict, error_enum) -> None:
resp_data["text"] = error_enum.desc
resp_data["error_code"] = error_enum.code
def get_response(resp_data: Dict[str, Any]) -> str:
return "data: " + json.dumps(resp_data) + "\n\n"
def parse_params(params: OrderedDict) -> Tuple:
assert (isinstance(params, dict) or isinstance(params,
OrderedDict)), "params were expected as dict or OrderedDict, but got {}.".format(
type(params))
if type(params.get('hist_messages', {})) == str:
unquote_messages = unquote(params.get('hist_messages', {}))
params['hist_messages'] = json.loads(unquote_messages)
prompt = params.get('prompt', "")
if params.get('url_encode', False):
params['prompt'] = unquote(prompt)
else:
params['prompt'] = prompt
request_id = str(params.get("request_id", "-1"))
max_new_tokens = int(params.get("max_new_tokens", model.max_new_tokens))
max_new_tokens = min(max_new_tokens, model.max_new_tokens)
temperature = float(params.get("temperature", 0.6))
repetition_penalty = float(params.get("repetition_penalty", 1.2))
top_p = float(params.get("top_p", 1.0))
top_k = int(params.get("top_k", 4))
random_seed_ = int(params.get("random_seed", -1))
if random_seed_ == -1:
random_seed_ = 231221
if request_id == "-1":
request_id = random_seed_
params["request_id"] = request_id
params["random_seed"] = random_seed_
infer_decode_args = {
"max_new_tokens": max_new_tokens,
"top_k": top_k,
"top_p": top_p,
"temperature": temperature,
"repetition_penalty": repetition_penalty,
"random_seed_": random_seed_,
"request_id": request_id
}
return tuple([infer_decode_args, params])
## 解析参数
infer_decode_args, params = parse_params(params)
request_id = infer_decode_args.get("request_id")
check_in = int(params.get("check_in", 1))
chunk_out = bool(params.get("chunk_out", True))
## 构造 Prompt
message = params.get("prompt", "")
hist_messages = params.get("hist_messages", OrderedDict())
max_new_tokens = infer_decode_args.get("max_new_tokens", model.max_new_tokens)
query_prompt_tuple = model.get_multiround_template(message, max_new_tokens, hist_messages=hist_messages,
prefix=None, response="")
messages = query_prompt_tuple[1]
query = query_prompt_tuple[0]
input_len = len(query)
## 请求 TritonServer 获取流式推理结果
response_data = {
"random_seed": infer_decode_args.get("random_seed", -1),
"request_id": request_id,
"version": WORKER_VERSION,
}
result_queue = queue.Queue()
proc = threading.Thread(target=model.chat_stream, args=(query, result_queue), kwargs=infer_decode_args)
proc.start()
proc_pid = threading.get_native_id()
request_id = "{}_{}".format(request_id, proc_pid)
bytes_ids = []
decode_len = 0
bytes_len = 0
punc = string.punctuation
try:
while True:
res = result_queue.get()
if res is None:
break
if isinstance(res, List):
if chunk_out:
bytes_ids += res[input_len + decode_len + bytes_len:]
decoding = tokenizer.decode(bytes_ids, skip_special_tokens=True)
if isinstance(decoding, bytes) or (isinstance(decoding, str) and '�' in decoding):
bytes_len = len(bytes_ids)
continue
else:
decode_len += len(bytes_ids)
bytes_len = 0
bytes_ids = []
else:
output_len = len(res[input_len:])
decoding = tokenizer.decode(res[input_len:], skip_special_tokens=True)
decoding = model.process_response(decoding)
response_data["text"] = decoding
response_data["error_code"] = CODES.SUCCESS.code
try:
response_instance = get_response(response_data)
except:
insert_error(response_data, CODES.JSON_FORMAT_ERROR)
response_instance = get_response(response_data)
yield response_instance
elif isinstance(res, InferenceServerException):
insert_error(response_data, CODES.TRITON_INFERENCE_ERROR)
yield get_response(response_data)
elif isinstance(res, Tuple):
insert_error(response_data, CODES.TRITON_CALLBACK_ERROR)
yield get_response(response_data)
else:
insert_error(response_data, CODES.UNKNOWN_ERROR)
yield get_response(response_data)
try:
proc.join()
except Exception as e:
traceback.print_exc()
except Exception as e:
if isinstance(e, RuntimeError):
exception_enum = CODES.RUNTIME_ERROR
elif isinstance(e, TypeError):
exception_enum = CODES.TYPE_ERROR
else:
exception_enum = CODES.UNKNOWN_ERROR
insert_error(response_data, exception_enum)
yield get_response(response_data)
async def release_model_semaphore():
global model_semaphore, global_counter
global_counter -= 1
model_semaphore.release()
async def generate_stream(request: Request):
global model_semaphore, global_counter
global_counter += 1
params = request.json
print("params:", params)
if model_semaphore is None:
limit_model_concurrency = args.limit_model_concurrency
model_semaphore = asyncio.Semaphore(limit_model_concurrency)
await model_semaphore.acquire()
async def generate_answer(response):
for chunk in generator_llm(params):
await response.write(chunk)
await asyncio.sleep(0.001)
await release_model_semaphore()
await response.eof()
# await asyncio.sleep(0.001)
response = ResponseStream(generate_answer, content_type='text/event-stream')
# response.headers['Cache-Control'] = 'no-cache'
return response | null |
20,465 | import argparse
import asyncio
import json
import logging
import multiprocessing as mp
import os
import psutil
import queue
import string
import signal
import sys
import traceback
import time
import threading
import sanic
from sanic import Sanic, Request
from sanic.response import ResponseStream
from collections import OrderedDict
from datetime import datetime
from transformers import AutoTokenizer
from tritonclient.utils import InferenceServerException
from typing import List, Tuple, Dict, Optional, Any
from urllib.parse import unquote
from modeling_qwen import QwenTritonModel
from utils import log_timestamp, CODES
status = WorkerStatus(args.limit_model_concurrency)
async def api_get_status(request: Request):
return sanic.response.json(status.get_status()) | null |
20,466 | import argparse
import asyncio
import json
import logging
import multiprocessing as mp
import os
import psutil
import queue
import string
import signal
import sys
import traceback
import time
import threading
import sanic
from sanic import Sanic, Request
from sanic.response import ResponseStream
from collections import OrderedDict
from datetime import datetime
from transformers import AutoTokenizer
from tritonclient.utils import InferenceServerException
from typing import List, Tuple, Dict, Optional, Any
from urllib.parse import unquote
from modeling_qwen import QwenTritonModel
from utils import log_timestamp, CODES
async def api_health_check(request: Request):
return sanic.response.text("HTTP/1.1 200 OK") | null |
20,467 | from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
from dataclasses import dataclass
import logging as logger
class Template:
prefix: List[Union[str, Dict[str, str]]]
prompt: List[Union[str, Dict[str, str]]]
sep: List[Union[str, Dict[str, str]]]
stop_words: List[str]
use_history: bool
def encode_oneturn(
self,
tokenizer: "PreTrainedTokenizer",
query: str,
resp: str,
history: Optional[List[Tuple[str, str]]] = None,
prefix: Optional[str] = None
) -> Tuple[List[int], List[int]]:
r"""
Returns a single pair of token ids representing prompt and response respectively.
"""
prefix, history = self._format(query, resp, history, prefix)
encoded_pairs = self._encode(tokenizer, prefix, history)
prompt_ids = []
for query_ids, resp_ids in encoded_pairs[:-1]:
prompt_ids = prompt_ids + query_ids + resp_ids
prompt_ids = prompt_ids + encoded_pairs[-1][0]
return prompt_ids, encoded_pairs[-1][1]
def encode_multiturn(
self,
tokenizer: "PreTrainedTokenizer",
query: str,
resp: str,
history: Optional[List[Tuple[str, str]]] = None,
prefix: Optional[str] = None
) -> List[Tuple[List[int], List[int]]]:
r"""
Returns multiple pairs of token ids representing prompts and responses respectively.
"""
prefix, history = self._format(query, resp, history, prefix)
encoded_pairs = self._encode(tokenizer, prefix, history)
return encoded_pairs
def _format(
self,
query: str,
resp: str,
history: Optional[List[Tuple[str, str]]] = None,
prefix: Optional[str] = None
) -> Tuple[List[Union[str, Dict[str, str]]], List[Tuple[str, str]]]:
r"""
Aligns inputs to a special format.
"""
prefix = [prefix] if prefix else self.prefix # use prefix if provided
history = history if (history and self.use_history) else []
history = history + [(query, resp)]
return prefix, history
def _get_special_ids(
self,
tokenizer: "PreTrainedTokenizer"
) -> Tuple[List[int], List[int]]:
if tokenizer.bos_token_id:
bos_ids = [tokenizer.bos_token_id]
else:
bos_ids = [] # bos token is optional
if tokenizer.eos_token_id:
eos_ids = [tokenizer.eos_token_id]
else:
raise ValueError("EOS token is required.")
return bos_ids, eos_ids
def _encode(
self,
tokenizer: "PreTrainedTokenizer",
prefix: List[Union[str, Dict[str, str]]],
history: List[Tuple[str, str]]
) -> List[Tuple[List[int], List[int]]]:
r"""
Encodes formatted inputs to pairs of token ids.
"""
bos_ids, eos_ids = self._get_special_ids(tokenizer)
sep_ids = self._convert_inputs_to_ids(tokenizer, context=self.sep)
encoded_pairs = []
for turn_idx, (query, resp) in enumerate(history):
if turn_idx != 0:
prefix_ids = sep_ids
elif prefix:
prefix_ids = self._convert_inputs_to_ids(tokenizer, context=prefix) + eos_ids + sep_ids
else:
prefix_ids = []
query_ids = self._convert_inputs_to_ids(tokenizer, context=self.prompt, query=query)
resp_ids = self._convert_inputs_to_ids(tokenizer, context=[resp])
encoded_pairs.append((bos_ids + prefix_ids + query_ids, resp_ids + eos_ids))
return encoded_pairs
def _convert_inputs_to_ids(
self,
tokenizer: "PreTrainedTokenizer",
context: List[Union[str, Dict[str, str]]],
query: Optional[str] = ""
) -> List[int]:
r"""
Converts context to token ids.
"""
if hasattr(tokenizer, "tokenizer"): # for tiktoken tokenizer (Qwen)
kwargs = dict(allowed_special="all")
else:
kwargs = dict(add_special_tokens=False)
token_ids = []
for elem in context:
if isinstance(elem, str):
elem = elem.replace("{{query}}", query, 1)
token_ids = token_ids + tokenizer.encode(elem, **kwargs)
elif isinstance(elem, dict):
token_ids = token_ids + [tokenizer.convert_tokens_to_ids(elem.get("token"))]
else:
raise NotImplementedError
return token_ids
templates: Dict[str, Template] = {}
def register_template(
name: str,
prefix: List[Union[str, Dict[str, str]]],
prompt: List[Union[str, Dict[str, str]]],
sep: List[Union[str, Dict[str, str]]],
stop_words: List[str],
use_history: bool
) -> None:
template_class = Template
templates[name] = template_class(
prefix=prefix,
prompt=prompt,
sep=sep,
stop_words=stop_words,
use_history=use_history
) | null |
20,468 | from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
from dataclasses import dataclass
import logging as logger
class Template:
prefix: List[Union[str, Dict[str, str]]]
prompt: List[Union[str, Dict[str, str]]]
sep: List[Union[str, Dict[str, str]]]
stop_words: List[str]
use_history: bool
def encode_oneturn(
self,
tokenizer: "PreTrainedTokenizer",
query: str,
resp: str,
history: Optional[List[Tuple[str, str]]] = None,
prefix: Optional[str] = None
) -> Tuple[List[int], List[int]]:
r"""
Returns a single pair of token ids representing prompt and response respectively.
"""
prefix, history = self._format(query, resp, history, prefix)
encoded_pairs = self._encode(tokenizer, prefix, history)
prompt_ids = []
for query_ids, resp_ids in encoded_pairs[:-1]:
prompt_ids = prompt_ids + query_ids + resp_ids
prompt_ids = prompt_ids + encoded_pairs[-1][0]
return prompt_ids, encoded_pairs[-1][1]
def encode_multiturn(
self,
tokenizer: "PreTrainedTokenizer",
query: str,
resp: str,
history: Optional[List[Tuple[str, str]]] = None,
prefix: Optional[str] = None
) -> List[Tuple[List[int], List[int]]]:
r"""
Returns multiple pairs of token ids representing prompts and responses respectively.
"""
prefix, history = self._format(query, resp, history, prefix)
encoded_pairs = self._encode(tokenizer, prefix, history)
return encoded_pairs
def _format(
self,
query: str,
resp: str,
history: Optional[List[Tuple[str, str]]] = None,
prefix: Optional[str] = None
) -> Tuple[List[Union[str, Dict[str, str]]], List[Tuple[str, str]]]:
r"""
Aligns inputs to a special format.
"""
prefix = [prefix] if prefix else self.prefix # use prefix if provided
history = history if (history and self.use_history) else []
history = history + [(query, resp)]
return prefix, history
def _get_special_ids(
self,
tokenizer: "PreTrainedTokenizer"
) -> Tuple[List[int], List[int]]:
if tokenizer.bos_token_id:
bos_ids = [tokenizer.bos_token_id]
else:
bos_ids = [] # bos token is optional
if tokenizer.eos_token_id:
eos_ids = [tokenizer.eos_token_id]
else:
raise ValueError("EOS token is required.")
return bos_ids, eos_ids
def _encode(
self,
tokenizer: "PreTrainedTokenizer",
prefix: List[Union[str, Dict[str, str]]],
history: List[Tuple[str, str]]
) -> List[Tuple[List[int], List[int]]]:
r"""
Encodes formatted inputs to pairs of token ids.
"""
bos_ids, eos_ids = self._get_special_ids(tokenizer)
sep_ids = self._convert_inputs_to_ids(tokenizer, context=self.sep)
encoded_pairs = []
for turn_idx, (query, resp) in enumerate(history):
if turn_idx != 0:
prefix_ids = sep_ids
elif prefix:
prefix_ids = self._convert_inputs_to_ids(tokenizer, context=prefix) + eos_ids + sep_ids
else:
prefix_ids = []
query_ids = self._convert_inputs_to_ids(tokenizer, context=self.prompt, query=query)
resp_ids = self._convert_inputs_to_ids(tokenizer, context=[resp])
encoded_pairs.append((bos_ids + prefix_ids + query_ids, resp_ids + eos_ids))
return encoded_pairs
def _convert_inputs_to_ids(
self,
tokenizer: "PreTrainedTokenizer",
context: List[Union[str, Dict[str, str]]],
query: Optional[str] = ""
) -> List[int]:
r"""
Converts context to token ids.
"""
if hasattr(tokenizer, "tokenizer"): # for tiktoken tokenizer (Qwen)
kwargs = dict(allowed_special="all")
else:
kwargs = dict(add_special_tokens=False)
token_ids = []
for elem in context:
if isinstance(elem, str):
elem = elem.replace("{{query}}", query, 1)
token_ids = token_ids + tokenizer.encode(elem, **kwargs)
elif isinstance(elem, dict):
token_ids = token_ids + [tokenizer.convert_tokens_to_ids(elem.get("token"))]
else:
raise NotImplementedError
return token_ids
templates: Dict[str, Template] = {}
def get_template_and_fix_tokenizer(
name: str,
tokenizer: "PreTrainedTokenizer"
) -> Template:
template = templates.get(name, None)
assert template is not None, "Template {} does not exist.".format(name)
if tokenizer.eos_token_id is None: # inplace method
if len(template.stop_words):
tokenizer.eos_token = template.stop_words[0]
else:
tokenizer.eos_token = "<|endoftext|>"
logger.info("Add eos token: {}".format(tokenizer.eos_token))
if tokenizer.pad_token_id is None:
tokenizer.pad_token = tokenizer.eos_token
logger.info("Add pad token: {}".format(tokenizer.pad_token))
tokenizer.add_special_tokens(dict(additional_special_tokens=template.stop_words))
return template | null |
20,469 | import base64
import logging
import os
import unicodedata
from typing import Collection, Dict, List, Set, Tuple, Union
import tiktoken
from transformers import PreTrainedTokenizer, AddedToken
def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
with open(tiktoken_bpe_file, "rb") as f:
contents = f.read()
return {
base64.b64decode(token): int(rank)
for token, rank in (line.split() for line in contents.splitlines() if line)
} | null |
20,470 | from qanything_kernel.configs.model_config import VECTOR_SEARCH_TOP_K, CHUNK_SIZE, VECTOR_SEARCH_SCORE_THRESHOLD, \
PROMPT_TEMPLATE, STREAMING
from typing import List
from qanything_kernel.connector.embedding.embedding_for_online import YouDaoEmbeddings
from qanything_kernel.connector.embedding.embedding_for_local import YouDaoLocalEmbeddings
import time
from qanything_kernel.connector.llm import OpenAILLM, ZiyueLLM
from langchain.schema import Document
from qanything_kernel.connector.database.mysql.mysql_client import KnowledgeBaseManager
from qanything_kernel.connector.database.milvus.milvus_client import MilvusClient
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from .local_file import LocalFile
from qanything_kernel.utils.general_utils import get_time
import requests
import traceback
import logging
def _embeddings_hash(self):
return hash(self.model_name) | null |
20,471 |
async def add_cors_headers(request, response):
# response.headers["Access-Control-Allow-Origin"] = "http://10.234.10.144:5052"
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Methods"] = "GET, POST, PUT, DELETE, OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Content-Type, Authorization"
response.headers["Access-Control-Allow-Credentials"] = "true" # 如果需要的话 | null |
20,472 |
async def handle_options_request(request):
if request.method == "OPTIONS":
headers = {
# "Access-Control-Allow-Origin": "http://10.234.10.144:5052",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type, Authorization",
"Access-Control-Allow-Credentials": "true" # 如果需要的话
}
return sanic_response.text("", headers=headers) | null |
20,473 |
class LocalDocQA:
def __init__(self):
self.llm: object = None
self.embeddings: object = None
self.top_k: int = VECTOR_SEARCH_TOP_K
self.chunk_size: int = CHUNK_SIZE
self.chunk_conent: bool = True
self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
self.milvus_kbs: List[MilvusClient] = []
self.milvus_summary: KnowledgeBaseManager = None
self.mode: str = None
self.local_rerank_service_url = "http://0.0.0.0:8776"
self.ocr_url = 'http://0.0.0.0:8010/ocr'
def get_ocr_result(self, image_data: dict):
response = requests.post(self.ocr_url, json=image_data)
response.raise_for_status() # 如果请求返回了错误状态码,将会抛出异常
return response.json()['results']
def init_cfg(self, mode='local'):
self.mode = mode
self.embeddings = YouDaoLocalEmbeddings()
if self.mode == 'local':
self.llm: ZiyueLLM = ZiyueLLM()
else:
self.llm: OpenAILLM = OpenAILLM()
self.milvus_summary = KnowledgeBaseManager(self.mode)
def create_milvus_collection(self, user_id, kb_id, kb_name):
milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
self.milvus_kbs.append(milvus_kb)
self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)
def match_milvus_kb(self, user_id, kb_ids):
for kb in self.milvus_kbs:
if user_id == kb.user_id and kb_ids == kb.kb_ids:
debug_logger.info(f'match milvus_client: {kb}')
return kb
milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
self.milvus_kbs.append(milvus_kb)
return milvus_kb
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
debug_logger.info(f'insert_files_to_milvus: {kb_id}')
milvus_kv = self.match_milvus_kb(user_id, [kb_id])
assert milvus_kv is not None
success_list = []
failed_list = []
for local_file in local_files:
start = time.time()
try:
local_file.split_file_to_docs(self.get_ocr_result)
content_length = sum([len(doc.page_content) for doc in local_file.docs])
except Exception as e:
error_info = f'split error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
self.milvus_summary.update_content_length(local_file.file_id, content_length)
debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')
start = time.time()
try:
local_file.create_embedding()
except Exception as e:
error_info = f'embedding error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')
self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))
ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
local_file.docs, local_file.embs)
insert_time = time.time()
debug_logger.info(f'insert time: {insert_time - end}')
if ret:
self.milvus_summary.update_file_status(local_file.file_id, status='green')
success_list.append(local_file)
else:
self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
failed_list.append(local_file)
debug_logger.info(
f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")
def deduplicate_documents(self, source_docs):
unique_docs = set()
deduplicated_docs = []
for doc in source_docs:
if doc.page_content not in unique_docs:
unique_docs.add(doc.page_content)
deduplicated_docs.append(doc)
return deduplicated_docs
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
milvus_kb: MilvusClient
if not top_k:
top_k = self.top_k
source_documents = []
embs = self.embeddings._get_len_safe_embeddings(queries)
t1 = time.time()
batch_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)
t2 = time.time()
debug_logger.info(f"milvus search time: {t2 - t1}")
for query, query_docs in zip(queries, batch_result):
for doc in query_docs:
doc.metadata['retrieval_query'] = query # 添加查询到文档的元数据中
doc.metadata['embed_version'] = self.embeddings.embed_version
source_documents.append(doc)
if cosine_thresh:
source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]
return source_documents
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
# 组装prompt,根据max_token
query_token_num = self.llm.num_tokens_from_messages([query])
history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
template_token_num = self.llm.num_tokens_from_messages([prompt_template])
# logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
new_source_docs = []
total_token_num = 0
for doc in source_docs:
doc_token_num = self.llm.num_tokens_from_docs([doc])
if total_token_num + doc_token_num <= limited_token_nums:
new_source_docs.append(doc)
total_token_num += doc_token_num
else:
remaining_token_num = limited_token_nums - total_token_num
doc_content = doc.page_content
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
while doc_content_token_num > remaining_token_num:
# Truncate the doc content to fit the remaining tokens
if len(doc_content) > 2 * self.llm.truncate_len:
doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
else: # 如果最后不够truncate_len长度的2倍,说明不够切了,直接赋值为空
doc_content = ""
break
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
doc.page_content = doc_content
new_source_docs.append(doc)
break
debug_logger.info(f"limited token nums: {limited_token_nums}")
debug_logger.info(f"template token nums: {template_token_num}")
debug_logger.info(f"query token nums: {query_token_num}")
debug_logger.info(f"history token nums: {history_token_num}")
debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
return new_source_docs
def generate_prompt(self, query, source_docs, prompt_template):
context = "\n".join([doc.page_content for doc in source_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def rerank_documents(self, query, source_documents):
return self.rerank_documents_for_local(query, source_documents)
def rerank_documents_for_local(self, query, source_documents):
if len(query) > 300: # tokens数量超过300时不使用local rerank
return source_documents
try:
response = requests.post(f"{self.local_rerank_service_url}/rerank",
json={"passages": [doc.page_content for doc in source_documents], "query": query})
scores = response.json()
for idx, score in enumerate(scores):
source_documents[idx].metadata['score'] = score
source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
except Exception as e:
debug_logger.error("rerank error: %s", traceback.format_exc())
debug_logger.warning("rerank error, use origin retrieval docs")
return source_documents
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
if chat_history is None:
chat_history = []
retrieval_queries = [query]
source_documents = self.get_source_documents(retrieval_queries, milvus_kb)
deduplicated_docs = self.deduplicate_documents(source_documents)
retrieval_documents = sorted(deduplicated_docs, key=lambda x: x.metadata['score'], reverse=True)
if rerank and len(retrieval_documents) > 1:
debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
retrieval_documents = self.rerank_documents(query, retrieval_documents)
source_documents = self.reprocess_source_documents(query=query,
source_docs=retrieval_documents,
history=chat_history,
prompt_template=PROMPT_TEMPLATE)
prompt = self.generate_prompt(query=query,
source_docs=source_documents,
prompt_template=PROMPT_TEMPLATE)
t1 = time.time()
for answer_result in self.llm.generatorAnswer(prompt=prompt,
history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
prompt = answer_result.prompt
history = answer_result.history
# logging.info(f"[debug] get_knowledge_based_answer history = {history}")
history[-1][0] = query
response = {"query": query,
"prompt": prompt,
"result": resp,
"retrieval_documents": retrieval_documents,
"source_documents": source_documents}
yield response, history
t2 = time.time()
debug_logger.info(f"LLM time: {t2 - t1}")
async def init_local_doc_qa(app, loop):
local_doc_qa = LocalDocQA()
local_doc_qa.init_cfg(mode=args.mode)
print(f'init local_doc_qa in {args.mode}', flush=True)
app.ctx.local_doc_qa = local_doc_qa | null |
20,474 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalDocQA:
def __init__(self):
self.llm: object = None
self.embeddings: object = None
self.top_k: int = VECTOR_SEARCH_TOP_K
self.chunk_size: int = CHUNK_SIZE
self.chunk_conent: bool = True
self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
self.milvus_kbs: List[MilvusClient] = []
self.milvus_summary: KnowledgeBaseManager = None
self.mode: str = None
self.local_rerank_service_url = "http://0.0.0.0:8776"
self.ocr_url = 'http://0.0.0.0:8010/ocr'
def get_ocr_result(self, image_data: dict):
response = requests.post(self.ocr_url, json=image_data)
response.raise_for_status() # 如果请求返回了错误状态码,将会抛出异常
return response.json()['results']
def init_cfg(self, mode='local'):
self.mode = mode
self.embeddings = YouDaoLocalEmbeddings()
if self.mode == 'local':
self.llm: ZiyueLLM = ZiyueLLM()
else:
self.llm: OpenAILLM = OpenAILLM()
self.milvus_summary = KnowledgeBaseManager(self.mode)
def create_milvus_collection(self, user_id, kb_id, kb_name):
milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
self.milvus_kbs.append(milvus_kb)
self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)
def match_milvus_kb(self, user_id, kb_ids):
for kb in self.milvus_kbs:
if user_id == kb.user_id and kb_ids == kb.kb_ids:
debug_logger.info(f'match milvus_client: {kb}')
return kb
milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
self.milvus_kbs.append(milvus_kb)
return milvus_kb
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
debug_logger.info(f'insert_files_to_milvus: {kb_id}')
milvus_kv = self.match_milvus_kb(user_id, [kb_id])
assert milvus_kv is not None
success_list = []
failed_list = []
for local_file in local_files:
start = time.time()
try:
local_file.split_file_to_docs(self.get_ocr_result)
content_length = sum([len(doc.page_content) for doc in local_file.docs])
except Exception as e:
error_info = f'split error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
self.milvus_summary.update_content_length(local_file.file_id, content_length)
debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')
start = time.time()
try:
local_file.create_embedding()
except Exception as e:
error_info = f'embedding error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')
self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))
ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
local_file.docs, local_file.embs)
insert_time = time.time()
debug_logger.info(f'insert time: {insert_time - end}')
if ret:
self.milvus_summary.update_file_status(local_file.file_id, status='green')
success_list.append(local_file)
else:
self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
failed_list.append(local_file)
debug_logger.info(
f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")
def deduplicate_documents(self, source_docs):
unique_docs = set()
deduplicated_docs = []
for doc in source_docs:
if doc.page_content not in unique_docs:
unique_docs.add(doc.page_content)
deduplicated_docs.append(doc)
return deduplicated_docs
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
milvus_kb: MilvusClient
if not top_k:
top_k = self.top_k
source_documents = []
embs = self.embeddings._get_len_safe_embeddings(queries)
t1 = time.time()
batch_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)
t2 = time.time()
debug_logger.info(f"milvus search time: {t2 - t1}")
for query, query_docs in zip(queries, batch_result):
for doc in query_docs:
doc.metadata['retrieval_query'] = query # 添加查询到文档的元数据中
doc.metadata['embed_version'] = self.embeddings.embed_version
source_documents.append(doc)
if cosine_thresh:
source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]
return source_documents
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
# 组装prompt,根据max_token
query_token_num = self.llm.num_tokens_from_messages([query])
history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
template_token_num = self.llm.num_tokens_from_messages([prompt_template])
# logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
new_source_docs = []
total_token_num = 0
for doc in source_docs:
doc_token_num = self.llm.num_tokens_from_docs([doc])
if total_token_num + doc_token_num <= limited_token_nums:
new_source_docs.append(doc)
total_token_num += doc_token_num
else:
remaining_token_num = limited_token_nums - total_token_num
doc_content = doc.page_content
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
while doc_content_token_num > remaining_token_num:
# Truncate the doc content to fit the remaining tokens
if len(doc_content) > 2 * self.llm.truncate_len:
doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
else: # 如果最后不够truncate_len长度的2倍,说明不够切了,直接赋值为空
doc_content = ""
break
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
doc.page_content = doc_content
new_source_docs.append(doc)
break
debug_logger.info(f"limited token nums: {limited_token_nums}")
debug_logger.info(f"template token nums: {template_token_num}")
debug_logger.info(f"query token nums: {query_token_num}")
debug_logger.info(f"history token nums: {history_token_num}")
debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
return new_source_docs
def generate_prompt(self, query, source_docs, prompt_template):
context = "\n".join([doc.page_content for doc in source_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def rerank_documents(self, query, source_documents):
return self.rerank_documents_for_local(query, source_documents)
def rerank_documents_for_local(self, query, source_documents):
if len(query) > 300: # tokens数量超过300时不使用local rerank
return source_documents
try:
response = requests.post(f"{self.local_rerank_service_url}/rerank",
json={"passages": [doc.page_content for doc in source_documents], "query": query})
scores = response.json()
for idx, score in enumerate(scores):
source_documents[idx].metadata['score'] = score
source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
except Exception as e:
debug_logger.error("rerank error: %s", traceback.format_exc())
debug_logger.warning("rerank error, use origin retrieval docs")
return source_documents
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
if chat_history is None:
chat_history = []
retrieval_queries = [query]
source_documents = self.get_source_documents(retrieval_queries, milvus_kb)
deduplicated_docs = self.deduplicate_documents(source_documents)
retrieval_documents = sorted(deduplicated_docs, key=lambda x: x.metadata['score'], reverse=True)
if rerank and len(retrieval_documents) > 1:
debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
retrieval_documents = self.rerank_documents(query, retrieval_documents)
source_documents = self.reprocess_source_documents(query=query,
source_docs=retrieval_documents,
history=chat_history,
prompt_template=PROMPT_TEMPLATE)
prompt = self.generate_prompt(query=query,
source_docs=source_documents,
prompt_template=PROMPT_TEMPLATE)
t1 = time.time()
for answer_result in self.llm.generatorAnswer(prompt=prompt,
history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
prompt = answer_result.prompt
history = answer_result.history
# logging.info(f"[debug] get_knowledge_based_answer history = {history}")
history[-1][0] = query
response = {"query": query,
"prompt": prompt,
"result": resp,
"retrieval_documents": retrieval_documents,
"source_documents": source_documents}
yield response, history
t2 = time.time()
debug_logger.info(f"LLM time: {t2 - t1}")
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def new_knowledge_base(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info("new_knowledge_base %s", user_id)
kb_name = safe_get(req, 'kb_name')
kb_id = 'KB' + uuid.uuid4().hex
local_doc_qa.create_milvus_collection(user_id, kb_id, kb_name)
now = datetime.now()
timestamp = now.strftime("%Y%m%d%H%M")
return sanic_json({"code": 200, "msg": "success create knowledge base {}".format(kb_id),
"data": {"kb_id": kb_id, "kb_name": kb_name, "timestamp": timestamp}}) | null |
20,475 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalFile:
def __init__(self, user_id, kb_id, file: Union[File, str], file_id, file_name, embedding, is_url=False, in_milvus=False):
self.user_id = user_id
self.kb_id = kb_id
self.file_id = file_id
self.docs: List[Document] = []
self.embs = []
self.emb_infer = embedding
self.url = None
self.in_milvus = in_milvus
self.file_name = file_name
if is_url:
self.url = file
self.file_path = "URL"
self.file_content = b''
else:
if isinstance(file, str):
self.file_path = file
with open(file, 'rb') as f:
self.file_content = f.read()
else:
upload_path = os.path.join(UPLOAD_ROOT_PATH, user_id)
file_dir = os.path.join(upload_path, self.file_id)
os.makedirs(file_dir, exist_ok=True)
self.file_path = os.path.join(file_dir, self.file_name)
self.file_content = file.body
with open(self.file_path, "wb+") as f:
f.write(self.file_content)
debug_logger.info(f'success init localfile {self.file_name}')
def split_file_to_docs(self, ocr_engine: Callable, sentence_size=SENTENCE_SIZE,
using_zh_title_enhance=ZH_TITLE_ENHANCE):
if self.url:
debug_logger.info("load url: {}".format(self.url))
loader = MyRecursiveUrlLoader(url=self.url)
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(text_splitter=textsplitter)
elif self.file_path.lower().endswith(".md"):
loader = UnstructuredFileLoader(self.file_path, mode="elements")
docs = loader.load()
elif self.file_path.lower().endswith(".txt"):
loader = TextLoader(self.file_path, autodetect_encoding=True)
texts_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(texts_splitter)
elif self.file_path.lower().endswith(".pdf"):
loader = UnstructuredPaddlePDFLoader(self.file_path, ocr_engine)
texts_splitter = ChineseTextSplitter(pdf=True, sentence_size=sentence_size)
docs = loader.load_and_split(texts_splitter)
elif self.file_path.lower().endswith(".jpg") or self.file_path.lower().endswith(
".png") or self.file_path.lower().endswith(".jpeg"):
loader = UnstructuredPaddleImageLoader(self.file_path, ocr_engine, mode="elements")
texts_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(text_splitter=texts_splitter)
elif self.file_path.lower().endswith(".docx"):
loader = UnstructuredWordDocumentLoader(self.file_path, mode="elements")
texts_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(texts_splitter)
elif self.file_path.lower().endswith(".xlsx"):
# loader = UnstructuredExcelLoader(self.file_path, mode="elements")
csv_file_path = self.file_path[:-5] + '.csv'
xlsx = pd.read_excel(self.file_path, engine='openpyxl')
xlsx.to_csv(csv_file_path, index=False)
loader = CSVLoader(csv_file_path, csv_args={"delimiter": ",", "quotechar": '"'})
docs = loader.load()
elif self.file_path.lower().endswith(".pptx"):
loader = UnstructuredPowerPointLoader(self.file_path, mode="elements")
docs = loader.load()
elif self.file_path.lower().endswith(".eml"):
loader = UnstructuredEmailLoader(self.file_path, mode="elements")
docs = loader.load()
elif self.file_path.lower().endswith(".csv"):
loader = CSVLoader(self.file_path, csv_args={"delimiter": ",", "quotechar": '"'})
docs = loader.load()
else:
raise TypeError("文件类型不支持,目前仅支持:[md,txt,pdf,jpg,png,jpeg,docx,xlsx,pptx,eml,csv]")
if using_zh_title_enhance:
debug_logger.info("using_zh_title_enhance %s", using_zh_title_enhance)
docs = zh_title_enhance(docs)
# 重构docs,如果doc的文本长度大于800tokens,则利用text_splitter将其拆分成多个doc
# text_splitter: RecursiveCharacterTextSplitter
debug_logger.info(f"before 2nd split doc lens: {len(docs)}")
docs = text_splitter.split_documents(docs)
debug_logger.info(f"after 2nd split doc lens: {len(docs)}")
# 这里给每个docs片段的metadata里注入file_id
for doc in docs:
doc.metadata["file_id"] = self.file_id
doc.metadata["file_name"] = self.url if self.url else os.path.split(self.file_path)[-1]
write_check_file(self.file_path, docs)
if docs:
debug_logger.info('langchain analysis content head: %s', docs[0].page_content[:100])
else:
debug_logger.info('langchain analysis docs is empty!')
self.docs = docs
def create_embedding(self):
self.embs = self.emb_infer._get_len_safe_embeddings([doc.page_content for doc in self.docs])
class LocalDocQA:
def __init__(self):
self.llm: object = None
self.embeddings: object = None
self.top_k: int = VECTOR_SEARCH_TOP_K
self.chunk_size: int = CHUNK_SIZE
self.chunk_conent: bool = True
self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
self.milvus_kbs: List[MilvusClient] = []
self.milvus_summary: KnowledgeBaseManager = None
self.mode: str = None
self.local_rerank_service_url = "http://0.0.0.0:8776"
self.ocr_url = 'http://0.0.0.0:8010/ocr'
def get_ocr_result(self, image_data: dict):
response = requests.post(self.ocr_url, json=image_data)
response.raise_for_status() # 如果请求返回了错误状态码,将会抛出异常
return response.json()['results']
def init_cfg(self, mode='local'):
self.mode = mode
self.embeddings = YouDaoLocalEmbeddings()
if self.mode == 'local':
self.llm: ZiyueLLM = ZiyueLLM()
else:
self.llm: OpenAILLM = OpenAILLM()
self.milvus_summary = KnowledgeBaseManager(self.mode)
def create_milvus_collection(self, user_id, kb_id, kb_name):
milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
self.milvus_kbs.append(milvus_kb)
self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)
def match_milvus_kb(self, user_id, kb_ids):
for kb in self.milvus_kbs:
if user_id == kb.user_id and kb_ids == kb.kb_ids:
debug_logger.info(f'match milvus_client: {kb}')
return kb
milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
self.milvus_kbs.append(milvus_kb)
return milvus_kb
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
debug_logger.info(f'insert_files_to_milvus: {kb_id}')
milvus_kv = self.match_milvus_kb(user_id, [kb_id])
assert milvus_kv is not None
success_list = []
failed_list = []
for local_file in local_files:
start = time.time()
try:
local_file.split_file_to_docs(self.get_ocr_result)
content_length = sum([len(doc.page_content) for doc in local_file.docs])
except Exception as e:
error_info = f'split error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
self.milvus_summary.update_content_length(local_file.file_id, content_length)
debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')
start = time.time()
try:
local_file.create_embedding()
except Exception as e:
error_info = f'embedding error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')
self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))
ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
local_file.docs, local_file.embs)
insert_time = time.time()
debug_logger.info(f'insert time: {insert_time - end}')
if ret:
self.milvus_summary.update_file_status(local_file.file_id, status='green')
success_list.append(local_file)
else:
self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
failed_list.append(local_file)
debug_logger.info(
f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")
def deduplicate_documents(self, source_docs):
unique_docs = set()
deduplicated_docs = []
for doc in source_docs:
if doc.page_content not in unique_docs:
unique_docs.add(doc.page_content)
deduplicated_docs.append(doc)
return deduplicated_docs
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
milvus_kb: MilvusClient
if not top_k:
top_k = self.top_k
source_documents = []
embs = self.embeddings._get_len_safe_embeddings(queries)
t1 = time.time()
batch_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)
t2 = time.time()
debug_logger.info(f"milvus search time: {t2 - t1}")
for query, query_docs in zip(queries, batch_result):
for doc in query_docs:
doc.metadata['retrieval_query'] = query # 添加查询到文档的元数据中
doc.metadata['embed_version'] = self.embeddings.embed_version
source_documents.append(doc)
if cosine_thresh:
source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]
return source_documents
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
# 组装prompt,根据max_token
query_token_num = self.llm.num_tokens_from_messages([query])
history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
template_token_num = self.llm.num_tokens_from_messages([prompt_template])
# logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
new_source_docs = []
total_token_num = 0
for doc in source_docs:
doc_token_num = self.llm.num_tokens_from_docs([doc])
if total_token_num + doc_token_num <= limited_token_nums:
new_source_docs.append(doc)
total_token_num += doc_token_num
else:
remaining_token_num = limited_token_nums - total_token_num
doc_content = doc.page_content
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
while doc_content_token_num > remaining_token_num:
# Truncate the doc content to fit the remaining tokens
if len(doc_content) > 2 * self.llm.truncate_len:
doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
else: # 如果最后不够truncate_len长度的2倍,说明不够切了,直接赋值为空
doc_content = ""
break
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
doc.page_content = doc_content
new_source_docs.append(doc)
break
debug_logger.info(f"limited token nums: {limited_token_nums}")
debug_logger.info(f"template token nums: {template_token_num}")
debug_logger.info(f"query token nums: {query_token_num}")
debug_logger.info(f"history token nums: {history_token_num}")
debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
return new_source_docs
def generate_prompt(self, query, source_docs, prompt_template):
context = "\n".join([doc.page_content for doc in source_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def rerank_documents(self, query, source_documents):
return self.rerank_documents_for_local(query, source_documents)
def rerank_documents_for_local(self, query, source_documents):
if len(query) > 300: # tokens数量超过300时不使用local rerank
return source_documents
try:
response = requests.post(f"{self.local_rerank_service_url}/rerank",
json={"passages": [doc.page_content for doc in source_documents], "query": query})
scores = response.json()
for idx, score in enumerate(scores):
source_documents[idx].metadata['score'] = score
source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
except Exception as e:
debug_logger.error("rerank error: %s", traceback.format_exc())
debug_logger.warning("rerank error, use origin retrieval docs")
return source_documents
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
if chat_history is None:
chat_history = []
retrieval_queries = [query]
source_documents = self.get_source_documents(retrieval_queries, milvus_kb)
deduplicated_docs = self.deduplicate_documents(source_documents)
retrieval_documents = sorted(deduplicated_docs, key=lambda x: x.metadata['score'], reverse=True)
if rerank and len(retrieval_documents) > 1:
debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
retrieval_documents = self.rerank_documents(query, retrieval_documents)
source_documents = self.reprocess_source_documents(query=query,
source_docs=retrieval_documents,
history=chat_history,
prompt_template=PROMPT_TEMPLATE)
prompt = self.generate_prompt(query=query,
source_docs=source_documents,
prompt_template=PROMPT_TEMPLATE)
t1 = time.time()
for answer_result in self.llm.generatorAnswer(prompt=prompt,
history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
prompt = answer_result.prompt
history = answer_result.history
# logging.info(f"[debug] get_knowledge_based_answer history = {history}")
history[-1][0] = query
response = {"query": query,
"prompt": prompt,
"result": resp,
"retrieval_documents": retrieval_documents,
"source_documents": source_documents}
yield response, history
t2 = time.time()
debug_logger.info(f"LLM time: {t2 - t1}")
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def upload_weblink(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info("upload_weblink %s", user_id)
kb_id = safe_get(req, 'kb_id')
url = safe_get(req, 'url')
mode = safe_get(req, 'mode', default='soft') # soft代表不上传同名文件,strong表示强制上传同名文件
not_exist_kb_ids = local_doc_qa.milvus_summary.check_kb_exist(user_id, [kb_id])
if not_exist_kb_ids:
msg = "invalid kb_id: {}, please check...".format(not_exist_kb_ids)
return sanic_json({"code": 2001, "msg": msg, "data": [{}]})
now = datetime.now()
timestamp = now.strftime("%Y%m%d%H%M")
exist_files = []
if mode == 'soft':
exist_files = local_doc_qa.milvus_summary.check_file_exist_by_name(user_id, kb_id, [url])
if exist_files:
file_id, file_name, file_size, status = exist_files[0]
msg = f'warning,当前的mode是soft,无法上传同名文件,如果想强制上传同名文件,请设置mode:strong'
data = [{"file_id": file_id, "file_name": url, "status": status, "bytes": file_size, "timestamp": timestamp}]
else:
file_id, msg = local_doc_qa.milvus_summary.add_file(user_id, kb_id, url, timestamp)
local_file = LocalFile(user_id, kb_id, url, file_id, url, local_doc_qa.embeddings, is_url=True)
data = [{"file_id": file_id, "file_name": url, "status": "gray", "bytes": 0, "timestamp": timestamp}]
asyncio.create_task(local_doc_qa.insert_files_to_milvus(user_id, kb_id, [local_file]))
msg = "success,后台正在飞速上传文件,请耐心等待"
return sanic_json({"code": 200, "msg": msg, "data": data}) | null |
20,476 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalFile:
def __init__(self, user_id, kb_id, file: Union[File, str], file_id, file_name, embedding, is_url=False, in_milvus=False):
def split_file_to_docs(self, ocr_engine: Callable, sentence_size=SENTENCE_SIZE,
using_zh_title_enhance=ZH_TITLE_ENHANCE):
def create_embedding(self):
class LocalDocQA:
def __init__(self):
def get_ocr_result(self, image_data: dict):
def init_cfg(self, mode='local'):
def create_milvus_collection(self, user_id, kb_id, kb_name):
def match_milvus_kb(self, user_id, kb_ids):
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
def deduplicate_documents(self, source_docs):
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
def generate_prompt(self, query, source_docs, prompt_template):
def rerank_documents(self, query, source_documents):
def rerank_documents_for_local(self, query, source_documents):
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def upload_files(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.form: {req.form},request.files: {req.files}请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info("upload_files %s", user_id)
kb_id = safe_get(req, 'kb_id')
mode = safe_get(req, 'mode', default='soft') # soft代表不上传同名文件,strong表示强制上传同名文件
debug_logger.info("mode: %s", mode)
use_local_file = safe_get(req, 'use_local_file', 'false')
if use_local_file == 'true':
files = read_files_with_extensions()
else:
files = req.files.getlist('files')
not_exist_kb_ids = local_doc_qa.milvus_summary.check_kb_exist(user_id, [kb_id])
if not_exist_kb_ids:
msg = "invalid kb_id: {}, please check...".format(not_exist_kb_ids)
return sanic_json({"code": 2001, "msg": msg, "data": [{}]})
data = []
local_files = []
file_names = []
for file in files:
if isinstance(file, str):
file_name = os.path.basename(file)
else:
debug_logger.info('ori name: %s', file.name)
file_name = urllib.parse.unquote(file.name, encoding='UTF-8')
debug_logger.info('decode name: %s', file_name)
# 删除掉全角字符
file_name = re.sub(r'[\uFF01-\uFF5E\u3000-\u303F]', '', file_name)
file_name = file_name.replace("/", "_")
debug_logger.info('cleaned name: %s', file_name)
file_name = truncate_filename(file_name)
file_names.append(file_name)
exist_file_names = []
if mode == 'soft':
exist_files = local_doc_qa.milvus_summary.check_file_exist_by_name(user_id, kb_id, file_names)
exist_file_names = [f[1] for f in exist_files]
now = datetime.now()
timestamp = now.strftime("%Y%m%d%H%M")
for file, file_name in zip(files, file_names):
if file_name in exist_file_names:
continue
file_id, msg = local_doc_qa.milvus_summary.add_file(user_id, kb_id, file_name, timestamp)
debug_logger.info(f"{file_name}, {file_id}, {msg}")
local_file = LocalFile(user_id, kb_id, file, file_id, file_name, local_doc_qa.embeddings)
local_files.append(local_file)
local_doc_qa.milvus_summary.update_file_size(file_id, len(local_file.file_content))
data.append(
{"file_id": file_id, "file_name": file_name, "status": "gray", "bytes": len(local_file.file_content),
"timestamp": timestamp})
asyncio.create_task(local_doc_qa.insert_files_to_milvus(user_id, kb_id, local_files))
if exist_file_names:
msg = f'warning,当前的mode是soft,无法上传同名文件{exist_file_names},如果想强制上传同名文件,请设置mode:strong'
else:
msg = "success,后台正在飞速上传文件,请耐心等待"
return sanic_json({"code": 200, "msg": msg, "data": data}) | null |
20,477 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalDocQA:
def __init__(self):
self.llm: object = None
self.embeddings: object = None
self.top_k: int = VECTOR_SEARCH_TOP_K
self.chunk_size: int = CHUNK_SIZE
self.chunk_conent: bool = True
self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
self.milvus_kbs: List[MilvusClient] = []
self.milvus_summary: KnowledgeBaseManager = None
self.mode: str = None
self.local_rerank_service_url = "http://0.0.0.0:8776"
self.ocr_url = 'http://0.0.0.0:8010/ocr'
def get_ocr_result(self, image_data: dict):
response = requests.post(self.ocr_url, json=image_data)
response.raise_for_status() # 如果请求返回了错误状态码,将会抛出异常
return response.json()['results']
def init_cfg(self, mode='local'):
self.mode = mode
self.embeddings = YouDaoLocalEmbeddings()
if self.mode == 'local':
self.llm: ZiyueLLM = ZiyueLLM()
else:
self.llm: OpenAILLM = OpenAILLM()
self.milvus_summary = KnowledgeBaseManager(self.mode)
def create_milvus_collection(self, user_id, kb_id, kb_name):
milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
self.milvus_kbs.append(milvus_kb)
self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)
def match_milvus_kb(self, user_id, kb_ids):
for kb in self.milvus_kbs:
if user_id == kb.user_id and kb_ids == kb.kb_ids:
debug_logger.info(f'match milvus_client: {kb}')
return kb
milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
self.milvus_kbs.append(milvus_kb)
return milvus_kb
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
debug_logger.info(f'insert_files_to_milvus: {kb_id}')
milvus_kv = self.match_milvus_kb(user_id, [kb_id])
assert milvus_kv is not None
success_list = []
failed_list = []
for local_file in local_files:
start = time.time()
try:
local_file.split_file_to_docs(self.get_ocr_result)
content_length = sum([len(doc.page_content) for doc in local_file.docs])
except Exception as e:
error_info = f'split error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
self.milvus_summary.update_content_length(local_file.file_id, content_length)
debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')
start = time.time()
try:
local_file.create_embedding()
except Exception as e:
error_info = f'embedding error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')
self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))
ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
local_file.docs, local_file.embs)
insert_time = time.time()
debug_logger.info(f'insert time: {insert_time - end}')
if ret:
self.milvus_summary.update_file_status(local_file.file_id, status='green')
success_list.append(local_file)
else:
self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
failed_list.append(local_file)
debug_logger.info(
f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")
def deduplicate_documents(self, source_docs):
unique_docs = set()
deduplicated_docs = []
for doc in source_docs:
if doc.page_content not in unique_docs:
unique_docs.add(doc.page_content)
deduplicated_docs.append(doc)
return deduplicated_docs
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
milvus_kb: MilvusClient
if not top_k:
top_k = self.top_k
source_documents = []
embs = self.embeddings._get_len_safe_embeddings(queries)
t1 = time.time()
batch_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)
t2 = time.time()
debug_logger.info(f"milvus search time: {t2 - t1}")
for query, query_docs in zip(queries, batch_result):
for doc in query_docs:
doc.metadata['retrieval_query'] = query # 添加查询到文档的元数据中
doc.metadata['embed_version'] = self.embeddings.embed_version
source_documents.append(doc)
if cosine_thresh:
source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]
return source_documents
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
# 组装prompt,根据max_token
query_token_num = self.llm.num_tokens_from_messages([query])
history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
template_token_num = self.llm.num_tokens_from_messages([prompt_template])
# logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
new_source_docs = []
total_token_num = 0
for doc in source_docs:
doc_token_num = self.llm.num_tokens_from_docs([doc])
if total_token_num + doc_token_num <= limited_token_nums:
new_source_docs.append(doc)
total_token_num += doc_token_num
else:
remaining_token_num = limited_token_nums - total_token_num
doc_content = doc.page_content
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
while doc_content_token_num > remaining_token_num:
# Truncate the doc content to fit the remaining tokens
if len(doc_content) > 2 * self.llm.truncate_len:
doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
else: # 如果最后不够truncate_len长度的2倍,说明不够切了,直接赋值为空
doc_content = ""
break
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
doc.page_content = doc_content
new_source_docs.append(doc)
break
debug_logger.info(f"limited token nums: {limited_token_nums}")
debug_logger.info(f"template token nums: {template_token_num}")
debug_logger.info(f"query token nums: {query_token_num}")
debug_logger.info(f"history token nums: {history_token_num}")
debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
return new_source_docs
def generate_prompt(self, query, source_docs, prompt_template):
context = "\n".join([doc.page_content for doc in source_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def rerank_documents(self, query, source_documents):
return self.rerank_documents_for_local(query, source_documents)
def rerank_documents_for_local(self, query, source_documents):
if len(query) > 300: # tokens数量超过300时不使用local rerank
return source_documents
try:
response = requests.post(f"{self.local_rerank_service_url}/rerank",
json={"passages": [doc.page_content for doc in source_documents], "query": query})
scores = response.json()
for idx, score in enumerate(scores):
source_documents[idx].metadata['score'] = score
source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
except Exception as e:
debug_logger.error("rerank error: %s", traceback.format_exc())
debug_logger.warning("rerank error, use origin retrieval docs")
return source_documents
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
if chat_history is None:
chat_history = []
retrieval_queries = [query]
source_documents = self.get_source_documents(retrieval_queries, milvus_kb)
deduplicated_docs = self.deduplicate_documents(source_documents)
retrieval_documents = sorted(deduplicated_docs, key=lambda x: x.metadata['score'], reverse=True)
if rerank and len(retrieval_documents) > 1:
debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
retrieval_documents = self.rerank_documents(query, retrieval_documents)
source_documents = self.reprocess_source_documents(query=query,
source_docs=retrieval_documents,
history=chat_history,
prompt_template=PROMPT_TEMPLATE)
prompt = self.generate_prompt(query=query,
source_docs=source_documents,
prompt_template=PROMPT_TEMPLATE)
t1 = time.time()
for answer_result in self.llm.generatorAnswer(prompt=prompt,
history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
prompt = answer_result.prompt
history = answer_result.history
# logging.info(f"[debug] get_knowledge_based_answer history = {history}")
history[-1][0] = query
response = {"query": query,
"prompt": prompt,
"result": resp,
"retrieval_documents": retrieval_documents,
"source_documents": source_documents}
yield response, history
t2 = time.time()
debug_logger.info(f"LLM time: {t2 - t1}")
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def list_kbs(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info("list_kbs %s", user_id)
kb_infos = local_doc_qa.milvus_summary.get_knowledge_bases(user_id)
data = []
for kb in kb_infos:
data.append({"kb_id": kb[0], "kb_name": kb[1]})
debug_logger.info("all kb infos: {}".format(data))
return sanic_json({"code": 200, "data": data}) | null |
20,478 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalDocQA:
def __init__(self):
self.llm: object = None
self.embeddings: object = None
self.top_k: int = VECTOR_SEARCH_TOP_K
self.chunk_size: int = CHUNK_SIZE
self.chunk_conent: bool = True
self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
self.milvus_kbs: List[MilvusClient] = []
self.milvus_summary: KnowledgeBaseManager = None
self.mode: str = None
self.local_rerank_service_url = "http://0.0.0.0:8776"
self.ocr_url = 'http://0.0.0.0:8010/ocr'
def get_ocr_result(self, image_data: dict):
response = requests.post(self.ocr_url, json=image_data)
response.raise_for_status() # 如果请求返回了错误状态码,将会抛出异常
return response.json()['results']
def init_cfg(self, mode='local'):
self.mode = mode
self.embeddings = YouDaoLocalEmbeddings()
if self.mode == 'local':
self.llm: ZiyueLLM = ZiyueLLM()
else:
self.llm: OpenAILLM = OpenAILLM()
self.milvus_summary = KnowledgeBaseManager(self.mode)
def create_milvus_collection(self, user_id, kb_id, kb_name):
milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
self.milvus_kbs.append(milvus_kb)
self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)
def match_milvus_kb(self, user_id, kb_ids):
for kb in self.milvus_kbs:
if user_id == kb.user_id and kb_ids == kb.kb_ids:
debug_logger.info(f'match milvus_client: {kb}')
return kb
milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
self.milvus_kbs.append(milvus_kb)
return milvus_kb
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
debug_logger.info(f'insert_files_to_milvus: {kb_id}')
milvus_kv = self.match_milvus_kb(user_id, [kb_id])
assert milvus_kv is not None
success_list = []
failed_list = []
for local_file in local_files:
start = time.time()
try:
local_file.split_file_to_docs(self.get_ocr_result)
content_length = sum([len(doc.page_content) for doc in local_file.docs])
except Exception as e:
error_info = f'split error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
self.milvus_summary.update_content_length(local_file.file_id, content_length)
debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')
start = time.time()
try:
local_file.create_embedding()
except Exception as e:
error_info = f'embedding error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')
self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))
ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
local_file.docs, local_file.embs)
insert_time = time.time()
debug_logger.info(f'insert time: {insert_time - end}')
if ret:
self.milvus_summary.update_file_status(local_file.file_id, status='green')
success_list.append(local_file)
else:
self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
failed_list.append(local_file)
debug_logger.info(
f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")
def deduplicate_documents(self, source_docs):
unique_docs = set()
deduplicated_docs = []
for doc in source_docs:
if doc.page_content not in unique_docs:
unique_docs.add(doc.page_content)
deduplicated_docs.append(doc)
return deduplicated_docs
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
milvus_kb: MilvusClient
if not top_k:
top_k = self.top_k
source_documents = []
embs = self.embeddings._get_len_safe_embeddings(queries)
t1 = time.time()
batch_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)
t2 = time.time()
debug_logger.info(f"milvus search time: {t2 - t1}")
for query, query_docs in zip(queries, batch_result):
for doc in query_docs:
doc.metadata['retrieval_query'] = query # 添加查询到文档的元数据中
doc.metadata['embed_version'] = self.embeddings.embed_version
source_documents.append(doc)
if cosine_thresh:
source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]
return source_documents
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
# 组装prompt,根据max_token
query_token_num = self.llm.num_tokens_from_messages([query])
history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
template_token_num = self.llm.num_tokens_from_messages([prompt_template])
# logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
new_source_docs = []
total_token_num = 0
for doc in source_docs:
doc_token_num = self.llm.num_tokens_from_docs([doc])
if total_token_num + doc_token_num <= limited_token_nums:
new_source_docs.append(doc)
total_token_num += doc_token_num
else:
remaining_token_num = limited_token_nums - total_token_num
doc_content = doc.page_content
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
while doc_content_token_num > remaining_token_num:
# Truncate the doc content to fit the remaining tokens
if len(doc_content) > 2 * self.llm.truncate_len:
doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
else: # 如果最后不够truncate_len长度的2倍,说明不够切了,直接赋值为空
doc_content = ""
break
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
doc.page_content = doc_content
new_source_docs.append(doc)
break
debug_logger.info(f"limited token nums: {limited_token_nums}")
debug_logger.info(f"template token nums: {template_token_num}")
debug_logger.info(f"query token nums: {query_token_num}")
debug_logger.info(f"history token nums: {history_token_num}")
debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
return new_source_docs
def generate_prompt(self, query, source_docs, prompt_template):
context = "\n".join([doc.page_content for doc in source_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def rerank_documents(self, query, source_documents):
return self.rerank_documents_for_local(query, source_documents)
def rerank_documents_for_local(self, query, source_documents):
if len(query) > 300: # tokens数量超过300时不使用local rerank
return source_documents
try:
response = requests.post(f"{self.local_rerank_service_url}/rerank",
json={"passages": [doc.page_content for doc in source_documents], "query": query})
scores = response.json()
for idx, score in enumerate(scores):
source_documents[idx].metadata['score'] = score
source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
except Exception as e:
debug_logger.error("rerank error: %s", traceback.format_exc())
debug_logger.warning("rerank error, use origin retrieval docs")
return source_documents
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
if chat_history is None:
chat_history = []
retrieval_queries = [query]
source_documents = self.get_source_documents(retrieval_queries, milvus_kb)
deduplicated_docs = self.deduplicate_documents(source_documents)
retrieval_documents = sorted(deduplicated_docs, key=lambda x: x.metadata['score'], reverse=True)
if rerank and len(retrieval_documents) > 1:
debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
retrieval_documents = self.rerank_documents(query, retrieval_documents)
source_documents = self.reprocess_source_documents(query=query,
source_docs=retrieval_documents,
history=chat_history,
prompt_template=PROMPT_TEMPLATE)
prompt = self.generate_prompt(query=query,
source_docs=source_documents,
prompt_template=PROMPT_TEMPLATE)
t1 = time.time()
for answer_result in self.llm.generatorAnswer(prompt=prompt,
history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
prompt = answer_result.prompt
history = answer_result.history
# logging.info(f"[debug] get_knowledge_based_answer history = {history}")
history[-1][0] = query
response = {"query": query,
"prompt": prompt,
"result": resp,
"retrieval_documents": retrieval_documents,
"source_documents": source_documents}
yield response, history
t2 = time.time()
debug_logger.info(f"LLM time: {t2 - t1}")
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def list_docs(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info("list_docs %s", user_id)
kb_id = safe_get(req, 'kb_id')
debug_logger.info("kb_id: {}".format(kb_id))
data = []
file_infos = local_doc_qa.milvus_summary.get_files(user_id, kb_id)
status_count = {}
msg_map = {'gray': "正在上传中,请耐心等待",
'red': "split或embedding失败,请检查文件类型,仅支持[md,txt,pdf,jpg,png,jpeg,docx,xlsx,pptx,eml,csv]",
'yellow': "milvus插入失败,请稍后再试", 'green': "上传成功"}
for file_info in file_infos:
status = file_info[2]
if status not in status_count:
status_count[status] = 1
else:
status_count[status] += 1
data.append({"file_id": file_info[0], "file_name": file_info[1], "status": file_info[2], "bytes": file_info[3],
"content_length": file_info[4], "timestamp": file_info[5], "msg": msg_map[file_info[2]]})
return sanic_json({"code": 200, "msg": "success", "data": {'total': status_count, 'details': data}}) | null |
20,479 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalDocQA:
def __init__(self):
def get_ocr_result(self, image_data: dict):
def init_cfg(self, mode='local'):
def create_milvus_collection(self, user_id, kb_id, kb_name):
def match_milvus_kb(self, user_id, kb_ids):
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
def deduplicate_documents(self, source_docs):
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
def generate_prompt(self, query, source_docs, prompt_template):
def rerank_documents(self, query, source_documents):
def rerank_documents_for_local(self, query, source_documents):
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def delete_knowledge_base(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info("delete_knowledge_base %s", user_id)
kb_ids = safe_get(req, 'kb_ids')
not_exist_kb_ids = local_doc_qa.milvus_summary.check_kb_exist(user_id, kb_ids)
if not_exist_kb_ids:
return sanic_json({"code": 2003, "msg": "fail, knowledge Base {} not found".format(not_exist_kb_ids)})
milvus = local_doc_qa.match_milvus_kb(user_id, kb_ids)
for kb_id in kb_ids:
milvus.delete_partition(kb_id)
local_doc_qa.milvus_summary.delete_knowledge_base(user_id, kb_ids)
return sanic_json({"code": 200, "msg": "Knowledge Base {} delete success".format(kb_ids)}) | null |
20,480 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalDocQA:
def __init__(self):
self.llm: object = None
self.embeddings: object = None
self.top_k: int = VECTOR_SEARCH_TOP_K
self.chunk_size: int = CHUNK_SIZE
self.chunk_conent: bool = True
self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
self.milvus_kbs: List[MilvusClient] = []
self.milvus_summary: KnowledgeBaseManager = None
self.mode: str = None
self.local_rerank_service_url = "http://0.0.0.0:8776"
self.ocr_url = 'http://0.0.0.0:8010/ocr'
def get_ocr_result(self, image_data: dict):
response = requests.post(self.ocr_url, json=image_data)
response.raise_for_status() # 如果请求返回了错误状态码,将会抛出异常
return response.json()['results']
def init_cfg(self, mode='local'):
self.mode = mode
self.embeddings = YouDaoLocalEmbeddings()
if self.mode == 'local':
self.llm: ZiyueLLM = ZiyueLLM()
else:
self.llm: OpenAILLM = OpenAILLM()
self.milvus_summary = KnowledgeBaseManager(self.mode)
def create_milvus_collection(self, user_id, kb_id, kb_name):
milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
self.milvus_kbs.append(milvus_kb)
self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)
def match_milvus_kb(self, user_id, kb_ids):
for kb in self.milvus_kbs:
if user_id == kb.user_id and kb_ids == kb.kb_ids:
debug_logger.info(f'match milvus_client: {kb}')
return kb
milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
self.milvus_kbs.append(milvus_kb)
return milvus_kb
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
debug_logger.info(f'insert_files_to_milvus: {kb_id}')
milvus_kv = self.match_milvus_kb(user_id, [kb_id])
assert milvus_kv is not None
success_list = []
failed_list = []
for local_file in local_files:
start = time.time()
try:
local_file.split_file_to_docs(self.get_ocr_result)
content_length = sum([len(doc.page_content) for doc in local_file.docs])
except Exception as e:
error_info = f'split error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
self.milvus_summary.update_content_length(local_file.file_id, content_length)
debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')
start = time.time()
try:
local_file.create_embedding()
except Exception as e:
error_info = f'embedding error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')
self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))
ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
local_file.docs, local_file.embs)
insert_time = time.time()
debug_logger.info(f'insert time: {insert_time - end}')
if ret:
self.milvus_summary.update_file_status(local_file.file_id, status='green')
success_list.append(local_file)
else:
self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
failed_list.append(local_file)
debug_logger.info(
f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")
def deduplicate_documents(self, source_docs):
unique_docs = set()
deduplicated_docs = []
for doc in source_docs:
if doc.page_content not in unique_docs:
unique_docs.add(doc.page_content)
deduplicated_docs.append(doc)
return deduplicated_docs
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
milvus_kb: MilvusClient
if not top_k:
top_k = self.top_k
source_documents = []
embs = self.embeddings._get_len_safe_embeddings(queries)
t1 = time.time()
batch_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)
t2 = time.time()
debug_logger.info(f"milvus search time: {t2 - t1}")
for query, query_docs in zip(queries, batch_result):
for doc in query_docs:
doc.metadata['retrieval_query'] = query # 添加查询到文档的元数据中
doc.metadata['embed_version'] = self.embeddings.embed_version
source_documents.append(doc)
if cosine_thresh:
source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]
return source_documents
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
# 组装prompt,根据max_token
query_token_num = self.llm.num_tokens_from_messages([query])
history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
template_token_num = self.llm.num_tokens_from_messages([prompt_template])
# logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
new_source_docs = []
total_token_num = 0
for doc in source_docs:
doc_token_num = self.llm.num_tokens_from_docs([doc])
if total_token_num + doc_token_num <= limited_token_nums:
new_source_docs.append(doc)
total_token_num += doc_token_num
else:
remaining_token_num = limited_token_nums - total_token_num
doc_content = doc.page_content
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
while doc_content_token_num > remaining_token_num:
# Truncate the doc content to fit the remaining tokens
if len(doc_content) > 2 * self.llm.truncate_len:
doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
else: # 如果最后不够truncate_len长度的2倍,说明不够切了,直接赋值为空
doc_content = ""
break
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
doc.page_content = doc_content
new_source_docs.append(doc)
break
debug_logger.info(f"limited token nums: {limited_token_nums}")
debug_logger.info(f"template token nums: {template_token_num}")
debug_logger.info(f"query token nums: {query_token_num}")
debug_logger.info(f"history token nums: {history_token_num}")
debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
return new_source_docs
def generate_prompt(self, query, source_docs, prompt_template):
context = "\n".join([doc.page_content for doc in source_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def rerank_documents(self, query, source_documents):
return self.rerank_documents_for_local(query, source_documents)
def rerank_documents_for_local(self, query, source_documents):
if len(query) > 300: # tokens数量超过300时不使用local rerank
return source_documents
try:
response = requests.post(f"{self.local_rerank_service_url}/rerank",
json={"passages": [doc.page_content for doc in source_documents], "query": query})
scores = response.json()
for idx, score in enumerate(scores):
source_documents[idx].metadata['score'] = score
source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
except Exception as e:
debug_logger.error("rerank error: %s", traceback.format_exc())
debug_logger.warning("rerank error, use origin retrieval docs")
return source_documents
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
if chat_history is None:
chat_history = []
retrieval_queries = [query]
source_documents = self.get_source_documents(retrieval_queries, milvus_kb)
deduplicated_docs = self.deduplicate_documents(source_documents)
retrieval_documents = sorted(deduplicated_docs, key=lambda x: x.metadata['score'], reverse=True)
if rerank and len(retrieval_documents) > 1:
debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
retrieval_documents = self.rerank_documents(query, retrieval_documents)
source_documents = self.reprocess_source_documents(query=query,
source_docs=retrieval_documents,
history=chat_history,
prompt_template=PROMPT_TEMPLATE)
prompt = self.generate_prompt(query=query,
source_docs=source_documents,
prompt_template=PROMPT_TEMPLATE)
t1 = time.time()
for answer_result in self.llm.generatorAnswer(prompt=prompt,
history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
prompt = answer_result.prompt
history = answer_result.history
# logging.info(f"[debug] get_knowledge_based_answer history = {history}")
history[-1][0] = query
response = {"query": query,
"prompt": prompt,
"result": resp,
"retrieval_documents": retrieval_documents,
"source_documents": source_documents}
yield response, history
t2 = time.time()
debug_logger.info(f"LLM time: {t2 - t1}")
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def rename_knowledge_base(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info("rename_knowledge_base %s", user_id)
kb_id = safe_get(req, 'kb_id')
new_kb_name = safe_get(req, 'new_kb_name')
not_exist_kb_ids = local_doc_qa.milvus_summary.check_kb_exist(user_id, [kb_id])
if not_exist_kb_ids:
return sanic_json({"code": 2003, "msg": "fail, knowledge Base {} not found".format(not_exist_kb_ids[0])})
local_doc_qa.milvus_summary.rename_knowledge_base(user_id, kb_id, new_kb_name)
return sanic_json({"code": 200, "msg": "Knowledge Base {} rename success".format(kb_id)}) | null |
20,481 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalDocQA:
def __init__(self):
self.llm: object = None
self.embeddings: object = None
self.top_k: int = VECTOR_SEARCH_TOP_K
self.chunk_size: int = CHUNK_SIZE
self.chunk_conent: bool = True
self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
self.milvus_kbs: List[MilvusClient] = []
self.milvus_summary: KnowledgeBaseManager = None
self.mode: str = None
self.local_rerank_service_url = "http://0.0.0.0:8776"
self.ocr_url = 'http://0.0.0.0:8010/ocr'
def get_ocr_result(self, image_data: dict):
response = requests.post(self.ocr_url, json=image_data)
response.raise_for_status() # 如果请求返回了错误状态码,将会抛出异常
return response.json()['results']
def init_cfg(self, mode='local'):
self.mode = mode
self.embeddings = YouDaoLocalEmbeddings()
if self.mode == 'local':
self.llm: ZiyueLLM = ZiyueLLM()
else:
self.llm: OpenAILLM = OpenAILLM()
self.milvus_summary = KnowledgeBaseManager(self.mode)
def create_milvus_collection(self, user_id, kb_id, kb_name):
milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
self.milvus_kbs.append(milvus_kb)
self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)
def match_milvus_kb(self, user_id, kb_ids):
for kb in self.milvus_kbs:
if user_id == kb.user_id and kb_ids == kb.kb_ids:
debug_logger.info(f'match milvus_client: {kb}')
return kb
milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
self.milvus_kbs.append(milvus_kb)
return milvus_kb
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
debug_logger.info(f'insert_files_to_milvus: {kb_id}')
milvus_kv = self.match_milvus_kb(user_id, [kb_id])
assert milvus_kv is not None
success_list = []
failed_list = []
for local_file in local_files:
start = time.time()
try:
local_file.split_file_to_docs(self.get_ocr_result)
content_length = sum([len(doc.page_content) for doc in local_file.docs])
except Exception as e:
error_info = f'split error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
self.milvus_summary.update_content_length(local_file.file_id, content_length)
debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')
start = time.time()
try:
local_file.create_embedding()
except Exception as e:
error_info = f'embedding error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')
self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))
ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
local_file.docs, local_file.embs)
insert_time = time.time()
debug_logger.info(f'insert time: {insert_time - end}')
if ret:
self.milvus_summary.update_file_status(local_file.file_id, status='green')
success_list.append(local_file)
else:
self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
failed_list.append(local_file)
debug_logger.info(
f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")
def deduplicate_documents(self, source_docs):
unique_docs = set()
deduplicated_docs = []
for doc in source_docs:
if doc.page_content not in unique_docs:
unique_docs.add(doc.page_content)
deduplicated_docs.append(doc)
return deduplicated_docs
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
milvus_kb: MilvusClient
if not top_k:
top_k = self.top_k
source_documents = []
embs = self.embeddings._get_len_safe_embeddings(queries)
t1 = time.time()
batch_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)
t2 = time.time()
debug_logger.info(f"milvus search time: {t2 - t1}")
for query, query_docs in zip(queries, batch_result):
for doc in query_docs:
doc.metadata['retrieval_query'] = query # 添加查询到文档的元数据中
doc.metadata['embed_version'] = self.embeddings.embed_version
source_documents.append(doc)
if cosine_thresh:
source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]
return source_documents
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
# 组装prompt,根据max_token
query_token_num = self.llm.num_tokens_from_messages([query])
history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
template_token_num = self.llm.num_tokens_from_messages([prompt_template])
# logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
new_source_docs = []
total_token_num = 0
for doc in source_docs:
doc_token_num = self.llm.num_tokens_from_docs([doc])
if total_token_num + doc_token_num <= limited_token_nums:
new_source_docs.append(doc)
total_token_num += doc_token_num
else:
remaining_token_num = limited_token_nums - total_token_num
doc_content = doc.page_content
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
while doc_content_token_num > remaining_token_num:
# Truncate the doc content to fit the remaining tokens
if len(doc_content) > 2 * self.llm.truncate_len:
doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
else: # 如果最后不够truncate_len长度的2倍,说明不够切了,直接赋值为空
doc_content = ""
break
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
doc.page_content = doc_content
new_source_docs.append(doc)
break
debug_logger.info(f"limited token nums: {limited_token_nums}")
debug_logger.info(f"template token nums: {template_token_num}")
debug_logger.info(f"query token nums: {query_token_num}")
debug_logger.info(f"history token nums: {history_token_num}")
debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
return new_source_docs
def generate_prompt(self, query, source_docs, prompt_template):
context = "\n".join([doc.page_content for doc in source_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def rerank_documents(self, query, source_documents):
return self.rerank_documents_for_local(query, source_documents)
def rerank_documents_for_local(self, query, source_documents):
if len(query) > 300: # tokens数量超过300时不使用local rerank
return source_documents
try:
response = requests.post(f"{self.local_rerank_service_url}/rerank",
json={"passages": [doc.page_content for doc in source_documents], "query": query})
scores = response.json()
for idx, score in enumerate(scores):
source_documents[idx].metadata['score'] = score
source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
except Exception as e:
debug_logger.error("rerank error: %s", traceback.format_exc())
debug_logger.warning("rerank error, use origin retrieval docs")
return source_documents
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
if chat_history is None:
chat_history = []
retrieval_queries = [query]
source_documents = self.get_source_documents(retrieval_queries, milvus_kb)
deduplicated_docs = self.deduplicate_documents(source_documents)
retrieval_documents = sorted(deduplicated_docs, key=lambda x: x.metadata['score'], reverse=True)
if rerank and len(retrieval_documents) > 1:
debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
retrieval_documents = self.rerank_documents(query, retrieval_documents)
source_documents = self.reprocess_source_documents(query=query,
source_docs=retrieval_documents,
history=chat_history,
prompt_template=PROMPT_TEMPLATE)
prompt = self.generate_prompt(query=query,
source_docs=source_documents,
prompt_template=PROMPT_TEMPLATE)
t1 = time.time()
for answer_result in self.llm.generatorAnswer(prompt=prompt,
history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
prompt = answer_result.prompt
history = answer_result.history
# logging.info(f"[debug] get_knowledge_based_answer history = {history}")
history[-1][0] = query
response = {"query": query,
"prompt": prompt,
"result": resp,
"retrieval_documents": retrieval_documents,
"source_documents": source_documents}
yield response, history
t2 = time.time()
debug_logger.info(f"LLM time: {t2 - t1}")
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def delete_docs(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info("delete_docs %s", user_id)
kb_id = safe_get(req, 'kb_id')
file_ids = safe_get(req, "file_ids")
not_exist_kb_ids = local_doc_qa.milvus_summary.check_kb_exist(user_id, [kb_id])
if not_exist_kb_ids:
return sanic_json({"code": 2003, "msg": "fail, knowledge Base {} not found".format(not_exist_kb_ids[0])})
valid_file_infos = local_doc_qa.milvus_summary.check_file_exist(user_id, kb_id, file_ids)
if len(valid_file_infos) == 0:
return sanic_json({"code": 2004, "msg": "fail, files {} not found".format(file_ids)})
milvus_kb = local_doc_qa.match_milvus_kb(user_id, [kb_id])
milvus_kb.delete_files(file_ids)
# 删除数据库中的记录
local_doc_qa.milvus_summary.delete_files(kb_id, file_ids)
return sanic_json({"code": 200, "msg": "documents {} delete success".format(file_ids)}) | null |
20,482 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalDocQA:
def __init__(self):
self.llm: object = None
self.embeddings: object = None
self.top_k: int = VECTOR_SEARCH_TOP_K
self.chunk_size: int = CHUNK_SIZE
self.chunk_conent: bool = True
self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
self.milvus_kbs: List[MilvusClient] = []
self.milvus_summary: KnowledgeBaseManager = None
self.mode: str = None
self.local_rerank_service_url = "http://0.0.0.0:8776"
self.ocr_url = 'http://0.0.0.0:8010/ocr'
def get_ocr_result(self, image_data: dict):
response = requests.post(self.ocr_url, json=image_data)
response.raise_for_status() # 如果请求返回了错误状态码,将会抛出异常
return response.json()['results']
def init_cfg(self, mode='local'):
self.mode = mode
self.embeddings = YouDaoLocalEmbeddings()
if self.mode == 'local':
self.llm: ZiyueLLM = ZiyueLLM()
else:
self.llm: OpenAILLM = OpenAILLM()
self.milvus_summary = KnowledgeBaseManager(self.mode)
def create_milvus_collection(self, user_id, kb_id, kb_name):
milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
self.milvus_kbs.append(milvus_kb)
self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)
def match_milvus_kb(self, user_id, kb_ids):
for kb in self.milvus_kbs:
if user_id == kb.user_id and kb_ids == kb.kb_ids:
debug_logger.info(f'match milvus_client: {kb}')
return kb
milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
self.milvus_kbs.append(milvus_kb)
return milvus_kb
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
debug_logger.info(f'insert_files_to_milvus: {kb_id}')
milvus_kv = self.match_milvus_kb(user_id, [kb_id])
assert milvus_kv is not None
success_list = []
failed_list = []
for local_file in local_files:
start = time.time()
try:
local_file.split_file_to_docs(self.get_ocr_result)
content_length = sum([len(doc.page_content) for doc in local_file.docs])
except Exception as e:
error_info = f'split error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
self.milvus_summary.update_content_length(local_file.file_id, content_length)
debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')
start = time.time()
try:
local_file.create_embedding()
except Exception as e:
error_info = f'embedding error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')
self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))
ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
local_file.docs, local_file.embs)
insert_time = time.time()
debug_logger.info(f'insert time: {insert_time - end}')
if ret:
self.milvus_summary.update_file_status(local_file.file_id, status='green')
success_list.append(local_file)
else:
self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
failed_list.append(local_file)
debug_logger.info(
f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")
def deduplicate_documents(self, source_docs):
unique_docs = set()
deduplicated_docs = []
for doc in source_docs:
if doc.page_content not in unique_docs:
unique_docs.add(doc.page_content)
deduplicated_docs.append(doc)
return deduplicated_docs
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
milvus_kb: MilvusClient
if not top_k:
top_k = self.top_k
source_documents = []
embs = self.embeddings._get_len_safe_embeddings(queries)
t1 = time.time()
batch_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)
t2 = time.time()
debug_logger.info(f"milvus search time: {t2 - t1}")
for query, query_docs in zip(queries, batch_result):
for doc in query_docs:
doc.metadata['retrieval_query'] = query # 添加查询到文档的元数据中
doc.metadata['embed_version'] = self.embeddings.embed_version
source_documents.append(doc)
if cosine_thresh:
source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]
return source_documents
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
# 组装prompt,根据max_token
query_token_num = self.llm.num_tokens_from_messages([query])
history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
template_token_num = self.llm.num_tokens_from_messages([prompt_template])
# logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
new_source_docs = []
total_token_num = 0
for doc in source_docs:
doc_token_num = self.llm.num_tokens_from_docs([doc])
if total_token_num + doc_token_num <= limited_token_nums:
new_source_docs.append(doc)
total_token_num += doc_token_num
else:
remaining_token_num = limited_token_nums - total_token_num
doc_content = doc.page_content
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
while doc_content_token_num > remaining_token_num:
# Truncate the doc content to fit the remaining tokens
if len(doc_content) > 2 * self.llm.truncate_len:
doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
else: # 如果最后不够truncate_len长度的2倍,说明不够切了,直接赋值为空
doc_content = ""
break
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
doc.page_content = doc_content
new_source_docs.append(doc)
break
debug_logger.info(f"limited token nums: {limited_token_nums}")
debug_logger.info(f"template token nums: {template_token_num}")
debug_logger.info(f"query token nums: {query_token_num}")
debug_logger.info(f"history token nums: {history_token_num}")
debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
return new_source_docs
def generate_prompt(self, query, source_docs, prompt_template):
context = "\n".join([doc.page_content for doc in source_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def rerank_documents(self, query, source_documents):
return self.rerank_documents_for_local(query, source_documents)
def rerank_documents_for_local(self, query, source_documents):
if len(query) > 300: # tokens数量超过300时不使用local rerank
return source_documents
try:
response = requests.post(f"{self.local_rerank_service_url}/rerank",
json={"passages": [doc.page_content for doc in source_documents], "query": query})
scores = response.json()
for idx, score in enumerate(scores):
source_documents[idx].metadata['score'] = score
source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
except Exception as e:
debug_logger.error("rerank error: %s", traceback.format_exc())
debug_logger.warning("rerank error, use origin retrieval docs")
return source_documents
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
if chat_history is None:
chat_history = []
retrieval_queries = [query]
source_documents = self.get_source_documents(retrieval_queries, milvus_kb)
deduplicated_docs = self.deduplicate_documents(source_documents)
retrieval_documents = sorted(deduplicated_docs, key=lambda x: x.metadata['score'], reverse=True)
if rerank and len(retrieval_documents) > 1:
debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
retrieval_documents = self.rerank_documents(query, retrieval_documents)
source_documents = self.reprocess_source_documents(query=query,
source_docs=retrieval_documents,
history=chat_history,
prompt_template=PROMPT_TEMPLATE)
prompt = self.generate_prompt(query=query,
source_docs=source_documents,
prompt_template=PROMPT_TEMPLATE)
t1 = time.time()
for answer_result in self.llm.generatorAnswer(prompt=prompt,
history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
prompt = answer_result.prompt
history = answer_result.history
# logging.info(f"[debug] get_knowledge_based_answer history = {history}")
history[-1][0] = query
response = {"query": query,
"prompt": prompt,
"result": resp,
"retrieval_documents": retrieval_documents,
"source_documents": source_documents}
yield response, history
t2 = time.time()
debug_logger.info(f"LLM time: {t2 - t1}")
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def get_total_status(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info('get_total_status %s', user_id)
if not user_id:
users = local_doc_qa.milvus_summary.get_users()
users = [user[0] for user in users]
else:
users = [user_id]
res = {}
for user in users:
res[user] = {}
kbs = local_doc_qa.milvus_summary.get_knowledge_bases(user)
for kb_id, kb_name in kbs:
gray_file_infos = local_doc_qa.milvus_summary.get_file_by_status([kb_id], 'gray')
red_file_infos = local_doc_qa.milvus_summary.get_file_by_status([kb_id], 'red')
yellow_file_infos = local_doc_qa.milvus_summary.get_file_by_status([kb_id], 'yellow')
green_file_infos = local_doc_qa.milvus_summary.get_file_by_status([kb_id], 'green')
res[user][kb_name + kb_id] = {'green': len(green_file_infos), 'yellow': len(yellow_file_infos),
'red': len(red_file_infos),
'gray': len(gray_file_infos)}
return sanic_json({"code": 200, "status": res}) | null |
20,483 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalDocQA:
def __init__(self):
self.llm: object = None
self.embeddings: object = None
self.top_k: int = VECTOR_SEARCH_TOP_K
self.chunk_size: int = CHUNK_SIZE
self.chunk_conent: bool = True
self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
self.milvus_kbs: List[MilvusClient] = []
self.milvus_summary: KnowledgeBaseManager = None
self.mode: str = None
self.local_rerank_service_url = "http://0.0.0.0:8776"
self.ocr_url = 'http://0.0.0.0:8010/ocr'
def get_ocr_result(self, image_data: dict):
response = requests.post(self.ocr_url, json=image_data)
response.raise_for_status() # 如果请求返回了错误状态码,将会抛出异常
return response.json()['results']
def init_cfg(self, mode='local'):
self.mode = mode
self.embeddings = YouDaoLocalEmbeddings()
if self.mode == 'local':
self.llm: ZiyueLLM = ZiyueLLM()
else:
self.llm: OpenAILLM = OpenAILLM()
self.milvus_summary = KnowledgeBaseManager(self.mode)
def create_milvus_collection(self, user_id, kb_id, kb_name):
milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
self.milvus_kbs.append(milvus_kb)
self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)
def match_milvus_kb(self, user_id, kb_ids):
for kb in self.milvus_kbs:
if user_id == kb.user_id and kb_ids == kb.kb_ids:
debug_logger.info(f'match milvus_client: {kb}')
return kb
milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
self.milvus_kbs.append(milvus_kb)
return milvus_kb
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
debug_logger.info(f'insert_files_to_milvus: {kb_id}')
milvus_kv = self.match_milvus_kb(user_id, [kb_id])
assert milvus_kv is not None
success_list = []
failed_list = []
for local_file in local_files:
start = time.time()
try:
local_file.split_file_to_docs(self.get_ocr_result)
content_length = sum([len(doc.page_content) for doc in local_file.docs])
except Exception as e:
error_info = f'split error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
self.milvus_summary.update_content_length(local_file.file_id, content_length)
debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')
start = time.time()
try:
local_file.create_embedding()
except Exception as e:
error_info = f'embedding error: {traceback.format_exc()}'
debug_logger.error(error_info)
self.milvus_summary.update_file_status(local_file.file_id, status='red')
failed_list.append(local_file)
continue
end = time.time()
debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')
self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))
ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
local_file.docs, local_file.embs)
insert_time = time.time()
debug_logger.info(f'insert time: {insert_time - end}')
if ret:
self.milvus_summary.update_file_status(local_file.file_id, status='green')
success_list.append(local_file)
else:
self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
failed_list.append(local_file)
debug_logger.info(
f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")
def deduplicate_documents(self, source_docs):
unique_docs = set()
deduplicated_docs = []
for doc in source_docs:
if doc.page_content not in unique_docs:
unique_docs.add(doc.page_content)
deduplicated_docs.append(doc)
return deduplicated_docs
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
milvus_kb: MilvusClient
if not top_k:
top_k = self.top_k
source_documents = []
embs = self.embeddings._get_len_safe_embeddings(queries)
t1 = time.time()
batch_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)
t2 = time.time()
debug_logger.info(f"milvus search time: {t2 - t1}")
for query, query_docs in zip(queries, batch_result):
for doc in query_docs:
doc.metadata['retrieval_query'] = query # 添加查询到文档的元数据中
doc.metadata['embed_version'] = self.embeddings.embed_version
source_documents.append(doc)
if cosine_thresh:
source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]
return source_documents
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
# 组装prompt,根据max_token
query_token_num = self.llm.num_tokens_from_messages([query])
history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
template_token_num = self.llm.num_tokens_from_messages([prompt_template])
# logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
new_source_docs = []
total_token_num = 0
for doc in source_docs:
doc_token_num = self.llm.num_tokens_from_docs([doc])
if total_token_num + doc_token_num <= limited_token_nums:
new_source_docs.append(doc)
total_token_num += doc_token_num
else:
remaining_token_num = limited_token_nums - total_token_num
doc_content = doc.page_content
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
while doc_content_token_num > remaining_token_num:
# Truncate the doc content to fit the remaining tokens
if len(doc_content) > 2 * self.llm.truncate_len:
doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
else: # 如果最后不够truncate_len长度的2倍,说明不够切了,直接赋值为空
doc_content = ""
break
doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
doc.page_content = doc_content
new_source_docs.append(doc)
break
debug_logger.info(f"limited token nums: {limited_token_nums}")
debug_logger.info(f"template token nums: {template_token_num}")
debug_logger.info(f"query token nums: {query_token_num}")
debug_logger.info(f"history token nums: {history_token_num}")
debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
return new_source_docs
def generate_prompt(self, query, source_docs, prompt_template):
context = "\n".join([doc.page_content for doc in source_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def rerank_documents(self, query, source_documents):
return self.rerank_documents_for_local(query, source_documents)
def rerank_documents_for_local(self, query, source_documents):
if len(query) > 300: # tokens数量超过300时不使用local rerank
return source_documents
try:
response = requests.post(f"{self.local_rerank_service_url}/rerank",
json={"passages": [doc.page_content for doc in source_documents], "query": query})
scores = response.json()
for idx, score in enumerate(scores):
source_documents[idx].metadata['score'] = score
source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
except Exception as e:
debug_logger.error("rerank error: %s", traceback.format_exc())
debug_logger.warning("rerank error, use origin retrieval docs")
return source_documents
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
if chat_history is None:
chat_history = []
retrieval_queries = [query]
source_documents = self.get_source_documents(retrieval_queries, milvus_kb)
deduplicated_docs = self.deduplicate_documents(source_documents)
retrieval_documents = sorted(deduplicated_docs, key=lambda x: x.metadata['score'], reverse=True)
if rerank and len(retrieval_documents) > 1:
debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
retrieval_documents = self.rerank_documents(query, retrieval_documents)
source_documents = self.reprocess_source_documents(query=query,
source_docs=retrieval_documents,
history=chat_history,
prompt_template=PROMPT_TEMPLATE)
prompt = self.generate_prompt(query=query,
source_docs=source_documents,
prompt_template=PROMPT_TEMPLATE)
t1 = time.time()
for answer_result in self.llm.generatorAnswer(prompt=prompt,
history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
prompt = answer_result.prompt
history = answer_result.history
# logging.info(f"[debug] get_knowledge_based_answer history = {history}")
history[-1][0] = query
response = {"query": query,
"prompt": prompt,
"result": resp,
"retrieval_documents": retrieval_documents,
"source_documents": source_documents}
yield response, history
t2 = time.time()
debug_logger.info(f"LLM time: {t2 - t1}")
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def clean_files_by_status(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info('clean_files_by_status %s', user_id)
status = safe_get(req, 'status', default='gray')
kb_ids = safe_get(req, 'kb_ids')
if not kb_ids:
kbs = local_doc_qa.milvus_summary.get_knowledge_bases(user_id)
kb_ids = [kb[0] for kb in kbs]
else:
not_exist_kb_ids = local_doc_qa.milvus_summary.check_kb_exist(user_id, kb_ids)
if not_exist_kb_ids:
return sanic_json({"code": 2003, "msg": "fail, knowledge Base {} not found".format(not_exist_kb_ids)})
gray_file_infos = local_doc_qa.milvus_summary.get_file_by_status(kb_ids, status)
gray_file_ids = [f[0] for f in gray_file_infos]
gray_file_names = [f[1] for f in gray_file_infos]
debug_logger.info(f'{status} files number: {len(gray_file_names)}')
# 删除milvus中的file
if gray_file_ids:
milvus_kb = local_doc_qa.match_milvus_kb(user_id, kb_ids)
milvus_kb.delete_files(gray_file_ids)
for kb_id in kb_ids:
local_doc_qa.milvus_summary.delete_files(kb_id, gray_file_ids)
return sanic_json({"code": 200, "msg": f"delete {status} files success", "data": gray_file_names}) | null |
20,484 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
class LocalDocQA:
def __init__(self):
def get_ocr_result(self, image_data: dict):
def init_cfg(self, mode='local'):
def create_milvus_collection(self, user_id, kb_id, kb_name):
def match_milvus_kb(self, user_id, kb_ids):
async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile]):
def deduplicate_documents(self, source_docs):
def get_source_documents(self, queries, milvus_kb, cosine_thresh=None, top_k=None):
def reprocess_source_documents(self, query: str,
source_docs: List[Document],
history: List[str],
prompt_template: str) -> List[Document]:
def generate_prompt(self, query, source_docs, prompt_template):
def rerank_documents(self, query, source_documents):
def rerank_documents_for_local(self, query, source_documents):
def get_knowledge_based_answer(self, query, milvus_kb, chat_history=None, streaming: bool = STREAMING,
rerank: bool = False):
debug_logger = logging.getLogger('debug_logger').setLevel(logging.INFO)' not in os.environ else os.environ[
async def local_doc_chat(req: request):
local_doc_qa: LocalDocQA = req.app.ctx.local_doc_qa
user_id = safe_get(req, 'user_id')
if user_id is None:
return sanic_json({"code": 2002, "msg": f'输入非法!request.json:{req.json},请检查!'})
is_valid = validate_user_id(user_id)
if not is_valid:
return sanic_json({"code": 2005, "msg": get_invalid_user_id_msg(user_id=user_id)})
debug_logger.info('local_doc_chat %s', user_id)
kb_ids = safe_get(req, 'kb_ids')
question = safe_get(req, 'question')
rerank = safe_get(req, 'rerank', default=True)
debug_logger.info('rerank %s', rerank)
streaming = safe_get(req, 'streaming', False)
history = safe_get(req, 'history', [])
debug_logger.info("history: %s ", history)
debug_logger.info("question: %s", question)
debug_logger.info("kb_ids: %s", kb_ids)
debug_logger.info("user_id: %s", user_id)
not_exist_kb_ids = local_doc_qa.milvus_summary.check_kb_exist(user_id, kb_ids)
if not_exist_kb_ids:
return sanic_json({"code": 2003, "msg": "fail, knowledge Base {} not found".format(not_exist_kb_ids)})
file_infos = []
milvus_kb = local_doc_qa.match_milvus_kb(user_id, kb_ids)
for kb_id in kb_ids:
file_infos.extend(local_doc_qa.milvus_summary.get_files(user_id, kb_id))
valid_files = [fi for fi in file_infos if fi[2] == 'green']
if len(valid_files) == 0:
return sanic_json({"code": 200, "msg": "当前知识库为空,请上传文件或等待文件解析完毕", "question": question,
"response": "All knowledge bases {} are empty or haven't green file, please upload files".format(
kb_ids), "history": history, "source_documents": [{}]})
else:
debug_logger.info("streaming: %s", streaming)
if streaming:
debug_logger.info("start generate answer")
async def generate_answer(response):
debug_logger.info("start generate...")
for resp, next_history in local_doc_qa.get_knowledge_based_answer(
query=question, milvus_kb=milvus_kb, chat_history=history, streaming=True, rerank=rerank
):
chunk_data = resp["result"]
if not chunk_data:
continue
chunk_str = chunk_data[6:]
if chunk_str.startswith("[DONE]"):
source_documents = []
for inum, doc in enumerate(resp["source_documents"]):
source_info = {'file_id': doc.metadata['file_id'],
'file_name': doc.metadata['file_name'],
'content': doc.page_content,
'retrieval_query': doc.metadata['retrieval_query'],
'score': str(doc.metadata['score'])}
source_documents.append(source_info)
retrieval_documents = format_source_documents(resp["retrieval_documents"])
source_documents = format_source_documents(resp["source_documents"])
chat_data = {'user_info': user_id, 'kb_ids': kb_ids, 'query': question, 'history': history,
'prompt': resp['prompt'], 'result': next_history[-1][1],
'retrieval_documents': retrieval_documents, 'source_documents': source_documents}
qa_logger.info("chat_data: %s", chat_data)
debug_logger.info("response: %s", chat_data['result'])
stream_res = {
"code": 200,
"msg": "success",
"question": question,
# "response":next_history[-1][1],
"response": "",
"history": next_history,
"source_documents": source_documents,
}
else:
chunk_js = json.loads(chunk_str)
delta_answer = chunk_js["answer"]
stream_res = {
"code": 200,
"msg": "success",
"question": "",
"response": delta_answer,
"history": [],
"source_documents": [],
}
await response.write(f"data: {json.dumps(stream_res, ensure_ascii=False)}\n\n")
if chunk_str.startswith("[DONE]"):
await response.eof()
await asyncio.sleep(0.001)
response_stream = ResponseStream(generate_answer, content_type='text/event-stream')
return response_stream
else:
for resp, history in local_doc_qa.get_knowledge_based_answer(
query=question, milvus_kb=milvus_kb, chat_history=history, streaming=False, rerank=rerank
):
pass
retrieval_documents = format_source_documents(resp["retrieval_documents"])
source_documents = format_source_documents(resp["source_documents"])
chat_data = {'user_id': user_id, 'kb_ids': kb_ids, 'query': question, 'history': history,
'retrieval_documents': retrieval_documents, 'prompt': resp['prompt'], 'result': resp['result'],
'source_documents': source_documents}
qa_logger.info("chat_data: %s", chat_data)
debug_logger.info("response: %s", chat_data['result'])
return sanic_json({"code": 200, "msg": "success chat", "question": question, "response": resp["result"],
"history": history, "source_documents": source_documents}) | null |
20,485 | from qanything_kernel.core.local_file import LocalFile
from qanything_kernel.core.local_doc_qa import LocalDocQA
from qanything_kernel.utils.general_utils import *
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from sanic.response import ResponseStream
from sanic.response import json as sanic_json
from sanic.response import text as sanic_text
from sanic import request
import uuid
import json
import asyncio
import urllib.parse
import re
from datetime import datetime
import os
async def document(req: request):
description = """
# QAnything 介绍
[戳我看视频>>>>>【有道QAnything介绍视频.mp4】](https://docs.popo.netease.com/docs/7e512e48fcb645adadddcf3107c97e7c)
**QAnything** (**Q**uestion and **A**nswer based on **Anything**) 是支持任意格式的本地知识库问答系统。
您的任何格式的本地文件都可以往里扔,即可获得准确、快速、靠谱的问答体验。
**目前已支持格式:**
* PDF
* Word(doc/docx)
* PPT
* TXT
* 图片
* 网页链接
* ...更多格式,敬请期待
# API 调用指南
## API Base URL
https://qanything.youdao.com
## 鉴权
目前使用微信鉴权,步骤如下:
1. 客户端通过扫码微信二维码(首次登录需要关注公众号)
2. 获取token
3. 调用下面所有API都需要通过authorization参数传入这个token
注意:authorization参数使用Bearer auth认证方式
生成微信二维码以及获取token的示例代码下载地址:[微信鉴权示例代码](https://docs.popo.netease.com/docs/66652d1a967e4f779594aef3306f6097)
## API 接口说明
{
"api": "/api/local_doc_qa/upload_files"
"name": "上传文件",
"description": "上传文件接口,支持多个文件同时上传,需要指定知识库名称",
},
{
"api": "/api/local_doc_qa/upload_weblink"
"name": "上传网页链接",
"description": "上传网页链接,自动爬取网页内容,需要指定知识库名称",
},
{
"api": "/api/local_doc_qa/local_doc_chat"
"name": "问答接口",
"description": "知识库问答接口,指定知识库名称,上传用户问题,通过传入history支持多轮对话",
},
{
"api": "/api/local_doc_qa/list_files"
"name": "文件列表",
"description": "列出指定知识库下的所有文件名,需要指定知识库名称",
},
{
"api": "/api/local_doc_qa/delete_files"
"name": "删除文件",
"description": "删除指定知识库下的指定文件,需要指定知识库名称",
},
"""
return sanic_text(description) | null |
20,486 | from sanic.request import Request
from sanic.exceptions import BadRequest
import traceback
from urllib.parse import urlparse
import time
import os
import logging
import re
import tiktoken
def get_invalid_user_id_msg(user_id):
return "fail, Invalid user_id: {}. user_id 必须只含有字母,数字和下划线且字母开头".format(user_id) | null |
20,487 | from sanic.request import Request
from sanic.exceptions import BadRequest
import traceback
from urllib.parse import urlparse
import time
import os
import logging
import re
import tiktoken
def write_check_file(filepath, docs):
folder_path = os.path.join(os.path.dirname(filepath), "tmp_files")
if not os.path.exists(folder_path):
os.makedirs(folder_path)
fp = os.path.join(folder_path, 'load_file.txt')
with open(fp, 'a+', encoding='utf-8') as fout:
fout.write("filepath=%s,len=%s" % (filepath, len(docs)))
fout.write('\n')
for i in docs:
fout.write(str(i))
fout.write('\n')
fout.close() | null |
20,488 | from sanic.request import Request
from sanic.exceptions import BadRequest
import traceback
from urllib.parse import urlparse
import time
import os
import logging
import re
import tiktoken
def isURL(string):
result = urlparse(string)
return result.scheme != '' and result.netloc != '' | null |
20,489 | from sanic.request import Request
from sanic.exceptions import BadRequest
import traceback
from urllib.parse import urlparse
import time
import os
import logging
import re
import tiktoken
def format_source_documents(ori_source_documents):
source_documents = []
for inum, doc in enumerate(ori_source_documents):
# for inum, doc in enumerate(answer_source_documents):
# doc_source = doc.metadata['source']
file_id = doc.metadata['file_id']
file_name = doc.metadata['file_name']
# source_str = doc_source if isURL(doc_source) else os.path.split(doc_source)[-1]
source_info = {'file_id': doc.metadata['file_id'],
'file_name': doc.metadata['file_name'],
'content': doc.page_content,
'retrieval_query': doc.metadata['retrieval_query'],
'kernel': doc.metadata['kernel'],
'score': str(doc.metadata['score']),
'embed_version': doc.metadata['embed_version']}
source_documents.append(source_info)
return source_documents | null |
20,490 | from sanic.request import Request
from sanic.exceptions import BadRequest
import traceback
from urllib.parse import urlparse
import time
import os
import logging
import re
import tiktoken
def get_time(func):
def inner(*arg, **kwargs):
s_time = time.time()
res = func(*arg, **kwargs)
e_time = time.time()
print('函数 {} 执行耗时: {} 秒'.format(func.__name__, e_time - s_time))
return res
return inner | null |
20,491 | from sanic.request import Request
from sanic.exceptions import BadRequest
import traceback
from urllib.parse import urlparse
import time
import os
import logging
import re
import tiktoken
def safe_get(req: Request, attr: str, default=None):
try:
if attr in req.form:
return req.form.getlist(attr)[0]
if attr in req.args:
return req.args[attr]
if attr in req.json:
return req.json[attr]
# if value := req.form.get(attr):
# return value
# if value := req.args.get(attr):
# return value
# """req.json执行时不校验content-type,body字段可能不能被正确解析为json"""
# if value := req.json.get(attr):
# return value
except BadRequest:
logging.warning(f"missing {attr} in request")
except Exception as e:
logging.warning(f"get {attr} from request failed:")
logging.warning(traceback.format_exc())
return default | null |
20,492 | from sanic.request import Request
from sanic.exceptions import BadRequest
import traceback
from urllib.parse import urlparse
import time
import os
import logging
import re
import tiktoken
def truncate_filename(filename, max_length=200):
# 获取文件名后缀
file_ext = os.path.splitext(filename)[1]
# 获取不带后缀的文件名
file_name_no_ext = os.path.splitext(filename)[0]
# 计算文件名长度,注意中文字符
filename_length = len(filename.encode('utf-8'))
# 如果文件名长度超过最大长度限制
if filename_length > max_length:
# 生成一个时间戳标记
timestamp = str(int(time.time()))
# 截取文件名
while filename_length > max_length:
file_name_no_ext = file_name_no_ext[:-4]
new_filename = file_name_no_ext + "_" + timestamp + file_ext
filename_length = len(new_filename.encode('utf-8'))
else:
new_filename = filename
return new_filename | null |
20,493 | from sanic.request import Request
from sanic.exceptions import BadRequest
import traceback
from urllib.parse import urlparse
import time
import os
import logging
import re
import tiktoken
def read_files_with_extensions():
# 获取当前脚本文件的路径
current_file = os.path.abspath(__file__)
# 获取当前脚本文件所在的目录
current_dir = os.path.dirname(current_file)
# 获取项目根目录
project_dir = os.path.dirname(current_dir)
directory = project_dir + '/data'
print(f'now reading {directory}')
extensions = ['.md', '.txt', '.pdf', '.jpg', '.docx', '.xlsx', '.eml', '.csv']
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(tuple(extensions)):
file_path = os.path.join(root, file)
yield file_path | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.