id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
16,801
from mmcv.utils import Registry from mmdet.models.builder import BACKBONES, HEADS, LOSSES, NECKS def build_fusion_model(cfg, train_cfg=None, test_cfg=None): return FUSIONMODELS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg) ) def build_model(cfg, train_cfg=None, test_cfg=None): return build_fusion_model(cfg, train_cfg=train_cfg, test_cfg=test_cfg)
null
16,802
import copy import numpy as np import torch import torch.nn.functional as F from mmcv.cnn import ConvModule, build_conv_layer from mmcv.runner import force_fp32 from torch import nn from mmdet3d.core import ( PseudoSampler, circle_nms, draw_heatmap_gaussian, gaussian_radius, xywhr2xyxyr, ) from mmdet3d.models.builder import HEADS, build_loss from mmdet3d.models.utils import FFN, PositionEmbeddingLearned, TransformerDecoderLayer from mmdet3d.ops.iou3d.iou3d_utils import nms_gpu from mmdet.core import ( AssignResult, build_assigner, build_bbox_coder, build_sampler, multi_apply, ) def clip_sigmoid(x, eps=1e-4): y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) return y
null
16,803
import copy import torch from mmcv.cnn import ConvModule, build_conv_layer from mmcv.runner import BaseModule, force_fp32 from torch import nn from mmdet3d.core import circle_nms, draw_heatmap_gaussian, gaussian_radius, xywhr2xyxyr from mmdet3d.models import builder from mmdet3d.models.builder import HEADS, build_loss from mmdet3d.ops.iou3d.iou3d_utils import nms_gpu from mmdet.core import build_bbox_coder, multi_apply def clip_sigmoid(x: torch.Tensor, eps: float = 1e-4) -> torch.Tensor: return torch.clamp(x.sigmoid_(), min=eps, max=1 - eps)
null
16,804
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import functional as F from mmdet3d.models.builder import HEADS def sigmoid_xent_loss( inputs: torch.Tensor, targets: torch.Tensor, reduction: str = "mean", ) -> torch.Tensor: inputs = inputs.float() targets = targets.float() return F.binary_cross_entropy_with_logits(inputs, targets, reduction=reduction)
null
16,805
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import functional as F from mmdet3d.models.builder import HEADS def sigmoid_focal_loss( inputs: torch.Tensor, targets: torch.Tensor, alpha: float = -1, gamma: float = 2, reduction: str = "mean", ) -> torch.Tensor: inputs = inputs.float() targets = targets.float() p = torch.sigmoid(inputs) ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") p_t = p * targets + (1 - p) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss if reduction == "mean": loss = loss.mean() elif reduction == "sum": loss = loss.sum() return loss
null
16,806
from mmcv.cnn import ConvModule, build_conv_layer, kaiming_init import torch from torch import nn import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn import Linear from torch.nn.init import xavier_uniform_, constant_ The provided code snippet includes necessary dependencies for implementing the `multi_head_attention_forward` function. Write a Python function `def multi_head_attention_forward(query, # type: Tensor key, # type: Tensor value, # type: Tensor embed_dim_to_check, # type: int num_heads, # type: int in_proj_weight, # type: Tensor in_proj_bias, # type: Tensor bias_k, # type: Optional[Tensor] bias_v, # type: Optional[Tensor] add_zero_attn, # type: bool dropout_p, # type: float out_proj_weight, # type: Tensor out_proj_bias, # type: Tensor training=True, # type: bool key_padding_mask=None, # type: Optional[Tensor] need_weights=True, # type: bool attn_mask=None, # type: Optional[Tensor] use_separate_proj_weight=False, # type: bool q_proj_weight=None, # type: Optional[Tensor] k_proj_weight=None, # type: Optional[Tensor] v_proj_weight=None, # type: Optional[Tensor] static_k=None, # type: Optional[Tensor] static_v=None, # type: Optional[Tensor] )` to solve the following problem: r""" Args: query, key, value: map a query and a set of key-value pairs to an output. See "Attention Is All You Need" for more details. embed_dim_to_check: total dimension of the model. num_heads: parallel attention heads. in_proj_weight, in_proj_bias: input projection weight and bias. bias_k, bias_v: bias of the key and value sequences to be added at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. dropout_p: probability of an element to be zeroed. out_proj_weight, out_proj_bias: the output projection weight and bias. training: apply dropout if is ``True``. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: mask that prevents attention to certain positions. This is an additive mask (i.e. the values will be added to the attention layer). use_separate_proj_weight: the function accept the proj. weights for query, key, and value in differnt forms. If false, in_proj_weight will be used, which is a combination of q_proj_weight, k_proj_weight, v_proj_weight. q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. static_k, static_v: static key and value used for attention operators. Shape: Inputs: - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length. Here is the function: def multi_head_attention_forward(query, # type: Tensor key, # type: Tensor value, # type: Tensor embed_dim_to_check, # type: int num_heads, # type: int in_proj_weight, # type: Tensor in_proj_bias, # type: Tensor bias_k, # type: Optional[Tensor] bias_v, # type: Optional[Tensor] add_zero_attn, # type: bool dropout_p, # type: float out_proj_weight, # type: Tensor out_proj_bias, # type: Tensor training=True, # type: bool key_padding_mask=None, # type: Optional[Tensor] need_weights=True, # type: bool attn_mask=None, # type: Optional[Tensor] use_separate_proj_weight=False, # type: bool q_proj_weight=None, # type: Optional[Tensor] k_proj_weight=None, # type: Optional[Tensor] v_proj_weight=None, # type: Optional[Tensor] static_k=None, # type: Optional[Tensor] static_v=None, # type: Optional[Tensor] ): # type: (...) -> Tuple[Tensor, Optional[Tensor]] r""" Args: query, key, value: map a query and a set of key-value pairs to an output. See "Attention Is All You Need" for more details. embed_dim_to_check: total dimension of the model. num_heads: parallel attention heads. in_proj_weight, in_proj_bias: input projection weight and bias. bias_k, bias_v: bias of the key and value sequences to be added at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. dropout_p: probability of an element to be zeroed. out_proj_weight, out_proj_bias: the output projection weight and bias. training: apply dropout if is ``True``. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: mask that prevents attention to certain positions. This is an additive mask (i.e. the values will be added to the attention layer). use_separate_proj_weight: the function accept the proj. weights for query, key, and value in differnt forms. If false, in_proj_weight will be used, which is a combination of q_proj_weight, k_proj_weight, v_proj_weight. q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. static_k, static_v: static key and value used for attention operators. Shape: Inputs: - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length. """ qkv_same = torch.equal(query, key) and torch.equal(key, value) kv_same = torch.equal(key, value) tgt_len, bsz, embed_dim = query.size() assert embed_dim == embed_dim_to_check assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() head_dim = embed_dim // num_heads assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" scaling = float(head_dim) ** -0.5 if use_separate_proj_weight is not True: if qkv_same: # self-attention q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) elif kv_same: # encoder-decoder attention # This is inline in_proj function with in_proj_weight and in_proj_bias _b = in_proj_bias _start = 0 _end = embed_dim _w = in_proj_weight[_start:_end, :] if _b is not None: _b = _b[_start:_end] q = F.linear(query, _w, _b) if key is None: assert value is None k = None v = None else: # This is inline in_proj function with in_proj_weight and in_proj_bias _b = in_proj_bias _start = embed_dim _end = None _w = in_proj_weight[_start:, :] if _b is not None: _b = _b[_start:] k, v = F.linear(key, _w, _b).chunk(2, dim=-1) else: # This is inline in_proj function with in_proj_weight and in_proj_bias _b = in_proj_bias _start = 0 _end = embed_dim _w = in_proj_weight[_start:_end, :] if _b is not None: _b = _b[_start:_end] q = F.linear(query, _w, _b) # This is inline in_proj function with in_proj_weight and in_proj_bias _b = in_proj_bias _start = embed_dim _end = embed_dim * 2 _w = in_proj_weight[_start:_end, :] if _b is not None: _b = _b[_start:_end] k = F.linear(key, _w, _b) # This is inline in_proj function with in_proj_weight and in_proj_bias _b = in_proj_bias _start = embed_dim * 2 _end = None _w = in_proj_weight[_start:, :] if _b is not None: _b = _b[_start:] v = F.linear(value, _w, _b) else: q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) len1, len2 = q_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == query.size(-1) k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) len1, len2 = k_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == key.size(-1) v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) len1, len2 = v_proj_weight_non_opt.size() assert len1 == embed_dim and len2 == value.size(-1) if in_proj_bias is not None: q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) else: q = F.linear(query, q_proj_weight_non_opt, in_proj_bias) k = F.linear(key, k_proj_weight_non_opt, in_proj_bias) v = F.linear(value, v_proj_weight_non_opt, in_proj_bias) q = q * scaling if bias_k is not None and bias_v is not None: if static_k is None and static_v is None: k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat([attn_mask, torch.zeros((attn_mask.size(0), 1), dtype=attn_mask.dtype, device=attn_mask.device)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat( [key_padding_mask, torch.zeros((key_padding_mask.size(0), 1), dtype=key_padding_mask.dtype, device=key_padding_mask.device)], dim=1) else: assert static_k is None, "bias cannot be added to static key." assert static_v is None, "bias cannot be added to static value." else: assert bias_k is None assert bias_v is None q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if static_k is not None: assert static_k.size(0) == bsz * num_heads assert static_k.size(2) == head_dim k = static_k if static_v is not None: assert static_v.size(0) == bsz * num_heads assert static_v.size(2) == head_dim v = static_v src_len = k.size(1) if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if add_zero_attn: src_len += 1 k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) if attn_mask is not None: attn_mask = torch.cat([attn_mask, torch.zeros((attn_mask.size(0), 1), dtype=attn_mask.dtype, device=attn_mask.device)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat( [key_padding_mask, torch.zeros((key_padding_mask.size(0), 1), dtype=key_padding_mask.dtype, device=key_padding_mask.device)], dim=1) attn_output_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) attn_output_weights = attn_output_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf'), ) attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) attn_output_weights = F.softmax( attn_output_weights, dim=-1) attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) attn_output = torch.bmm(attn_output_weights, v) assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias) if need_weights: # average attention weights over heads attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / num_heads else: return attn_output, None
r""" Args: query, key, value: map a query and a set of key-value pairs to an output. See "Attention Is All You Need" for more details. embed_dim_to_check: total dimension of the model. num_heads: parallel attention heads. in_proj_weight, in_proj_bias: input projection weight and bias. bias_k, bias_v: bias of the key and value sequences to be added at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. dropout_p: probability of an element to be zeroed. out_proj_weight, out_proj_bias: the output projection weight and bias. training: apply dropout if is ``True``. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: mask that prevents attention to certain positions. This is an additive mask (i.e. the values will be added to the attention layer). use_separate_proj_weight: the function accept the proj. weights for query, key, and value in differnt forms. If false, in_proj_weight will be used, which is a combination of q_proj_weight, k_proj_weight, v_proj_weight. q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. static_k, static_v: static key and value used for attention operators. Shape: Inputs: - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length.
16,807
import torch import torch.nn as nn from mmdet.models.backbones.swin import WindowMSA, ShiftWindowMSA from mmdet3d.ops.spconv import SparseConv3d, SubMConv3d from mmdet3d.models.utils.transformer import MultiheadAttention from typing import Union from thop import profile def count_window_msa(m: Union[WindowMSA, ShiftWindowMSA], x, y): if isinstance(m, WindowMSA): embed_dims = m.embed_dims num_heads = m.num_heads else: embed_dims = m.w_msa.embed_dims num_heads = m.w_msa.num_heads B, N, C = x[0].shape # qkv = model.qkv(x) m.total_ops += B * N * embed_dims * 3 * embed_dims # attn = (q @ k.transpose(-2, -1)) m.total_ops += B * num_heads * N * (embed_dims // num_heads) * N # x = (attn @ v) m.total_ops += num_heads * B * N * N * (embed_dims // num_heads) # x = m.proj(x) m.total_ops += B * N * embed_dims * embed_dims def count_sparseconv(m: Union[SparseConv3d, SubMConv3d], x, y): indice_dict = y.indice_dict[m.indice_key] kmap_size = indice_dict[-2].sum().item() m.total_ops += kmap_size * x[0].features.shape[1] * y.features.shape[1] def count_mha(m: Union[MultiheadAttention, nn.MultiheadAttention], x, y): flops = 0 if len(x) == 3: q, k, v = x elif len(x) == 2: q, k = x v = k elif len(x) == 1: q = x[0] k = v = q else: return batch_first = m.batch_first \ if hasattr(m, 'batch_first') else False if batch_first: batch_size = q.shape[0] len_idx = 1 else: batch_size = q.shape[1] len_idx = 0 dim_idx = 2 qdim = q.shape[dim_idx] kdim = k.shape[dim_idx] vdim = v.shape[dim_idx] qlen = q.shape[len_idx] klen = k.shape[len_idx] vlen = v.shape[len_idx] num_heads = m.num_heads assert qdim == m.embed_dim if m.kdim is None: assert kdim == qdim if m.vdim is None: assert vdim == qdim flops = 0 # Q scaling flops += qlen * qdim # Initial projections flops += ( (qlen * qdim * qdim) # QW + (klen * kdim * kdim) # KW + (vlen * vdim * vdim) # VW ) if m.in_proj_bias is not None: flops += (qlen + klen + vlen) * qdim # attention heads: scale, matmul, softmax, matmul qk_head_dim = qdim // num_heads v_head_dim = vdim // num_heads head_flops = ( (qlen * klen * qk_head_dim) # QK^T + (qlen * klen) # softmax + (qlen * klen * v_head_dim) # AV ) flops += num_heads * head_flops # final projection, bias is always enabled flops += qlen * vdim * (vdim + 1) flops *= batch_size m.total_ops += flops class MultiheadAttention(nn.Module): r"""Allows the model to jointly attend to information from different representation subspaces. See reference: Attention Is All You Need .. math:: \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) Args: embed_dim: total dimension of the model. num_heads: parallel attention heads. dropout: a Dropout layer on attn_output_weights. Default: 0.0. bias: add bias as module parameter. Default: True. add_bias_kv: add bias to the key and value sequences at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. kdim: total number of features in key. Default: None. vdim: total number of features in key. Default: None. Note: if kdim and vdim are None, they will be set to embed_dim such that query, key, and value have the same number of features. Examples:: >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) >>> attn_output, attn_output_weights = multihead_attn(query, key, value) """ def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) if self._qkv_same_embed_dim is False: self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) if bias: self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self._reset_parameters() def _reset_parameters(self): if self._qkv_same_embed_dim: xavier_uniform_(self.in_proj_weight) else: xavier_uniform_(self.q_proj_weight) xavier_uniform_(self.k_proj_weight) xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: constant_(self.in_proj_bias, 0.) constant_(self.out_proj.bias, 0.) if self.bias_k is not None: xavier_normal_(self.bias_k) if self.bias_v is not None: xavier_normal_(self.bias_v) def forward(self, query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None): r""" Args: query, key, value: map a query and a set of key-value pairs to an output. See "Attention Is All You Need" for more details. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: mask that prevents attention to certain positions. This is an additive mask (i.e. the values will be added to the attention layer). Shape: - Inputs: - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length. """ if hasattr(self, '_qkv_same_embed_dim') and self._qkv_same_embed_dim is False: return multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight) else: if not hasattr(self, '_qkv_same_embed_dim'): warnings.warn('A new version of MultiheadAttention module has been implemented. \ Please re-train your model with the new module', UserWarning) return multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask) def flops_counter(model, inputs): macs, params = profile( model, inputs, custom_ops={ WindowMSA: count_window_msa, #ShiftWindowMSA: count_window_msa, SparseConv3d: count_sparseconv, SubMConv3d: count_sparseconv, MultiheadAttention: count_mha }, verbose=False ) return macs, params
null
16,808
import copy import torch from collections import deque def convert_sync_batchnorm(input_model, exclude=[]): for name, module in input_model._modules.items(): skip = sum([ex in name for ex in exclude]) if skip: continue input_model._modules[name] = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module) return input_model
null
16,809
import copy def recursive_eval(obj, globals=None): if globals is None: globals = copy.deepcopy(obj) if isinstance(obj, dict): for key in obj: obj[key] = recursive_eval(obj[key], globals) elif isinstance(obj, list): for k, val in enumerate(obj): obj[k] = recursive_eval(val, globals) elif isinstance(obj, str) and obj.startswith("${") and obj.endswith("}"): obj = eval(obj[2:-1], globals) obj = recursive_eval(obj, globals) return obj
null
16,810
import logging from mmcv.utils import get_logger The provided code snippet includes necessary dependencies for implementing the `get_root_logger` function. Write a Python function `def get_root_logger(log_file=None, log_level=logging.INFO, name="mmdet3d")` to solve the following problem: Get root logger and add a keyword filter to it. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmdet3d". Args: log_file (str, optional): File path of log. Defaults to None. log_level (int, optional): The level of logger. Defaults to logging.INFO. name (str, optional): The name of the root logger, also used as a filter keyword. Defaults to 'mmdet3d'. Returns: :obj:`logging.Logger`: The obtained logger Here is the function: def get_root_logger(log_file=None, log_level=logging.INFO, name="mmdet3d"): """Get root logger and add a keyword filter to it. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmdet3d". Args: log_file (str, optional): File path of log. Defaults to None. log_level (int, optional): The level of logger. Defaults to logging.INFO. name (str, optional): The name of the root logger, also used as a filter keyword. Defaults to 'mmdet3d'. Returns: :obj:`logging.Logger`: The obtained logger """ logger = get_logger(name=name, log_file=log_file, log_level=log_level) # add a logging filter logging_filter = logging.Filter(name) logging_filter.filter = lambda record: record.find(name) != -1 return logger
Get root logger and add a keyword filter to it. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmdet3d". Args: log_file (str, optional): File path of log. Defaults to None. log_level (int, optional): The level of logger. Defaults to logging.INFO. name (str, optional): The name of the root logger, also used as a filter keyword. Defaults to 'mmdet3d'. Returns: :obj:`logging.Logger`: The obtained logger
16,811
import argparse import os import time import warnings import mmcv import onnx import torch from mmcv import Config, DictAction from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from mmcv.runner import get_dist_info, init_dist, load_checkpoint, wrap_fp16_model from mmdet3d.apis import single_gpu_test from mmdet3d.datasets import build_dataloader, build_dataset from mmdet3d.models import build_model from mmdet.apis import multi_gpu_test, set_random_seed from mmdet.datasets import replace_ImageToTensor from onnxsim import simplify from tqdm import tqdm def parse_args(): parser = argparse.ArgumentParser(description="MMDet test (and eval) a model") parser.add_argument("config", help="test config file path") parser.add_argument("checkpoint", help="checkpoint file") parser.add_argument( "--cfg-options", nargs="+", action=DictAction, help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file. If the value to " 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' "Note that the quotation marks are necessary and that no white space " "is allowed.", ) args = parser.parse_args() return args
null
16,812
import argparse from data_converter import nuscenes_converter as nuscenes_converter from data_converter.create_gt_database import create_groundtruth_database def create_groundtruth_database( dataset_class_name, data_path, info_prefix, info_path=None, mask_anno_path=None, used_classes=None, database_save_path=None, db_info_save_path=None, relative_path=True, add_rgb=False, lidar_only=False, bev_only=False, coors_range=None, with_mask=False, load_augmented=None, ): """Given the raw data, generate the ground truth database. Args: dataset_class_name (str): Name of the input dataset. data_path (str): Path of the data. info_prefix (str): Prefix of the info file. info_path (str): Path of the info file. Default: None. mask_anno_path (str): Path of the mask_anno. Default: None. used_classes (list[str]): Classes have been used. Default: None. database_save_path (str): Path to save database. Default: None. db_info_save_path (str): Path to save db_info. Default: None. relative_path (bool): Whether to use relative path. Default: True. with_mask (bool): Whether to use mask. Default: False. """ print(f"Create GT Database of {dataset_class_name}") dataset_cfg = dict( type=dataset_class_name, dataset_root=data_path, ann_file=info_path ) if dataset_class_name == "KittiDataset": dataset_cfg.update( test_mode=False, split="training", modality=dict( use_lidar=True, use_depth=False, use_lidar_intensity=True, use_camera=with_mask, ), pipeline=[ dict( type="LoadPointsFromFile", coord_type="LIDAR", load_dim=4, use_dim=4, ), dict( type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True, ), ], ) elif dataset_class_name == "NuScenesDataset": if not load_augmented: dataset_cfg.update( use_valid_flag=True, pipeline=[ dict( type="LoadPointsFromFile", coord_type="LIDAR", load_dim=5, use_dim=5, ), dict( type="LoadPointsFromMultiSweeps", sweeps_num=10, use_dim=[0, 1, 2, 3, 4], pad_empty_sweeps=True, remove_close=True, ), dict( type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True ), ], ) else: dataset_cfg.update( use_valid_flag=True, pipeline=[ dict( type="LoadPointsFromFile", coord_type="LIDAR", load_dim=16, use_dim=list(range(16)), load_augmented=load_augmented, ), dict( type="LoadPointsFromMultiSweeps", sweeps_num=10, load_dim=16, use_dim=list(range(16)), pad_empty_sweeps=True, remove_close=True, load_augmented=load_augmented, ), dict( type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True ), ], ) elif dataset_class_name == "WaymoDataset": dataset_cfg.update( test_mode=False, split="training", modality=dict( use_lidar=True, use_depth=False, use_lidar_intensity=True, use_camera=False, ), pipeline=[ dict( type="LoadPointsFromFile", coord_type="LIDAR", load_dim=6, use_dim=5, ), dict( type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True, ), ], ) dataset = build_dataset(dataset_cfg) if database_save_path is None: database_save_path = osp.join(data_path, f"{info_prefix}_gt_database") if db_info_save_path is None: db_info_save_path = osp.join(data_path, f"{info_prefix}_dbinfos_train.pkl") mmcv.mkdir_or_exist(database_save_path) all_db_infos = dict() if with_mask: coco = COCO(osp.join(data_path, mask_anno_path)) imgIds = coco.getImgIds() file2id = dict() for i in imgIds: info = coco.loadImgs([i])[0] file2id.update({info["file_name"]: i}) group_counter = 0 for j in track_iter_progress(list(range(len(dataset)))): input_dict = dataset.get_data_info(j) dataset.pre_pipeline(input_dict) example = dataset.pipeline(input_dict) annos = example["ann_info"] image_idx = example["sample_idx"] points = example["points"].tensor.numpy() gt_boxes_3d = annos["gt_bboxes_3d"].tensor.numpy() names = annos["gt_names"] group_dict = dict() if "group_ids" in annos: group_ids = annos["group_ids"] else: group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64) difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32) if "difficulty" in annos: difficulty = annos["difficulty"] num_obj = gt_boxes_3d.shape[0] point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d) if with_mask: # prepare masks gt_boxes = annos["gt_bboxes"] img_path = osp.split(example["img_info"]["filename"])[-1] if img_path not in file2id.keys(): print(f"skip image {img_path} for empty mask") continue img_id = file2id[img_path] kins_annIds = coco.getAnnIds(imgIds=img_id) kins_raw_info = coco.loadAnns(kins_annIds) kins_ann_info = _parse_coco_ann_info(kins_raw_info) h, w = annos["img_shape"][:2] gt_masks = [_poly2mask(mask, h, w) for mask in kins_ann_info["masks"]] # get mask inds based on iou mapping bbox_iou = bbox_overlaps(kins_ann_info["bboxes"], gt_boxes) mask_inds = bbox_iou.argmax(axis=0) valid_inds = bbox_iou.max(axis=0) > 0.5 # mask the image # use more precise crop when it is ready # object_img_patches = np.ascontiguousarray( # np.stack(object_img_patches, axis=0).transpose(0, 3, 1, 2)) # crop image patches using roi_align # object_img_patches = crop_image_patch_v2( # torch.Tensor(gt_boxes), # torch.Tensor(mask_inds).long(), object_img_patches) object_img_patches, object_masks = crop_image_patch( gt_boxes, gt_masks, mask_inds, annos["img"] ) for i in range(num_obj): filename = f"{image_idx}_{names[i]}_{i}.bin" abs_filepath = osp.join(database_save_path, filename) rel_filepath = osp.join(f"{info_prefix}_gt_database", filename) # save point clouds and image patches for each object gt_points = points[point_indices[:, i]] gt_points[:, :3] -= gt_boxes_3d[i, :3] if with_mask: if object_masks[i].sum() == 0 or not valid_inds[i]: # Skip object for empty or invalid mask continue img_patch_path = abs_filepath + ".png" mask_patch_path = abs_filepath + ".mask.png" mmcv.imwrite(object_img_patches[i], img_patch_path) mmcv.imwrite(object_masks[i], mask_patch_path) with open(abs_filepath, "w") as f: gt_points.tofile(f) if (used_classes is None) or names[i] in used_classes: db_info = { "name": names[i], "path": rel_filepath, "image_idx": image_idx, "gt_idx": i, "box3d_lidar": gt_boxes_3d[i], "num_points_in_gt": gt_points.shape[0], "difficulty": difficulty[i], } local_group_id = group_ids[i] # if local_group_id >= 0: if local_group_id not in group_dict: group_dict[local_group_id] = group_counter group_counter += 1 db_info["group_id"] = group_dict[local_group_id] if "score" in annos: db_info["score"] = annos["score"][i] if with_mask: db_info.update({"box2d_camera": gt_boxes[i]}) if names[i] in all_db_infos: all_db_infos[names[i]].append(db_info) else: all_db_infos[names[i]] = [db_info] for k, v in all_db_infos.items(): print(f"load {len(v)} {k} database infos") with open(db_info_save_path, "wb") as f: pickle.dump(all_db_infos, f) The provided code snippet includes necessary dependencies for implementing the `nuscenes_data_prep` function. Write a Python function `def nuscenes_data_prep( root_path, info_prefix, version, dataset_name, out_dir, max_sweeps=10, load_augmented=None, )` to solve the following problem: Prepare data related to nuScenes dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. dataset_name (str): The dataset class name. out_dir (str): Output directory of the groundtruth database info. max_sweeps (int): Number of input consecutive frames. Default: 10 Here is the function: def nuscenes_data_prep( root_path, info_prefix, version, dataset_name, out_dir, max_sweeps=10, load_augmented=None, ): """Prepare data related to nuScenes dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. dataset_name (str): The dataset class name. out_dir (str): Output directory of the groundtruth database info. max_sweeps (int): Number of input consecutive frames. Default: 10 """ if load_augmented is None: # otherwise, infos must have been created, we just skip. nuscenes_converter.create_nuscenes_infos( root_path, info_prefix, version=version, max_sweeps=max_sweeps ) # if version == "v1.0-test": # info_test_path = osp.join(root_path, f"{info_prefix}_infos_test.pkl") # nuscenes_converter.export_2d_annotation(root_path, info_test_path, version=version) # return # info_train_path = osp.join(root_path, f"{info_prefix}_infos_train.pkl") # info_val_path = osp.join(root_path, f"{info_prefix}_infos_val.pkl") # nuscenes_converter.export_2d_annotation(root_path, info_train_path, version=version) # nuscenes_converter.export_2d_annotation(root_path, info_val_path, version=version) create_groundtruth_database( dataset_name, root_path, info_prefix, f"{out_dir}/{info_prefix}_infos_train.pkl", load_augmented=load_augmented, )
Prepare data related to nuScenes dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. dataset_name (str): The dataset class name. out_dir (str): Output directory of the groundtruth database info. max_sweeps (int): Number of input consecutive frames. Default: 10
16,813
import argparse import copy import os import mmcv import numpy as np import torch from mmcv import Config from mmcv.parallel import MMDistributedDataParallel from mmcv.runner import load_checkpoint from torchpack import distributed as dist from torchpack.utils.config import configs from torchpack.utils.tqdm import tqdm from mmdet3d.core import LiDARInstance3DBoxes from mmdet3d.core.utils import visualize_camera, visualize_lidar, visualize_map from mmdet3d.datasets import build_dataloader, build_dataset from mmdet3d.models import build_model def recursive_eval(obj, globals=None): if globals is None: globals = copy.deepcopy(obj) if isinstance(obj, dict): for key in obj: obj[key] = recursive_eval(obj[key], globals) elif isinstance(obj, list): for k, val in enumerate(obj): obj[k] = recursive_eval(val, globals) elif isinstance(obj, str) and obj.startswith("${") and obj.endswith("}"): obj = eval(obj[2:-1], globals) obj = recursive_eval(obj, globals) return obj
null
16,814
import mmcv import numpy as np import os from collections import OrderedDict from nuscenes.nuscenes import NuScenes from nuscenes.utils.geometry_utils import view_points from os import path as osp from pyquaternion import Quaternion from shapely.geometry import MultiPoint, box from typing import List, Tuple, Union from mmdet3d.core.bbox.box_np_ops import points_cam2img from mmdet3d.datasets import NuScenesDataset nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier') def get_2d_boxes(nusc, sample_data_token: str, visibilities: List[str], mono3d=True): """Get the 2D annotation records for a given `sample_data_token`. Args: sample_data_token (str): Sample data token belonging to a camera \ keyframe. visibilities (list[str]): Visibility filter. mono3d (bool): Whether to get boxes with mono3d annotation. Return: list[dict]: List of 2D annotation record that belongs to the input `sample_data_token`. """ # Get the sample data and the sample corresponding to that sample data. sd_rec = nusc.get('sample_data', sample_data_token) assert sd_rec[ 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ ' for camera sample_data!' if not sd_rec['is_key_frame']: raise ValueError( 'The 2D re-projections are available only for keyframes.') s_rec = nusc.get('sample', sd_rec['sample_token']) # Get the calibrated sensor and ego pose # record to get the transformation matrices. cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) camera_intrinsic = np.array(cs_rec['camera_intrinsic']) # Get all the annotation with the specified visibilties. ann_recs = [ nusc.get('sample_annotation', token) for token in s_rec['anns'] ] ann_recs = [ ann_rec for ann_rec in ann_recs if (ann_rec['visibility_token'] in visibilities) ] repro_recs = [] for ann_rec in ann_recs: # Augment sample_annotation with token information. ann_rec['sample_annotation_token'] = ann_rec['token'] ann_rec['sample_data_token'] = sample_data_token # Get the box in global coordinates. box = nusc.get_box(ann_rec['token']) # Move them to the ego-pose frame. box.translate(-np.array(pose_rec['translation'])) box.rotate(Quaternion(pose_rec['rotation']).inverse) # Move them to the calibrated sensor frame. box.translate(-np.array(cs_rec['translation'])) box.rotate(Quaternion(cs_rec['rotation']).inverse) # Filter out the corners that are not in front of the calibrated # sensor. corners_3d = box.corners() in_front = np.argwhere(corners_3d[2, :] > 0).flatten() corners_3d = corners_3d[:, in_front] # Project 3d box to 2d. corner_coords = view_points(corners_3d, camera_intrinsic, True).T[:, :2].tolist() # Keep only corners that fall within the image. final_coords = post_process_coords(corner_coords) # Skip if the convex hull of the re-projected corners # does not intersect the image canvas. if final_coords is None: continue else: min_x, min_y, max_x, max_y = final_coords # Generate dictionary record to be included in the .json file. repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, sample_data_token, sd_rec['filename']) # If mono3d=True, add 3D annotations in camera coordinates if mono3d and (repro_rec is not None): loc = box.center.tolist() dim = box.wlh.tolist() rot = [box.orientation.yaw_pitch_roll[0]] global_velo2d = nusc.box_velocity(box.token)[:2] global_velo3d = np.array([*global_velo2d, 0.0]) e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix cam_velo3d = global_velo3d @ np.linalg.inv( e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T velo = cam_velo3d[0::2].tolist() repro_rec['bbox_cam3d'] = loc + dim + rot repro_rec['velo_cam3d'] = velo center3d = np.array(loc).reshape([1, 3]) center2d = points_cam2img( center3d, camera_intrinsic, with_depth=True) repro_rec['center2d'] = center2d.squeeze().tolist() # normalized center2D + depth # if samples with depth < 0 will be removed if repro_rec['center2d'][2] <= 0: continue ann_token = nusc.get('sample_annotation', box.token)['attribute_tokens'] if len(ann_token) == 0: attr_name = 'None' else: attr_name = nusc.get('attribute', ann_token[0])['name'] attr_id = nus_attributes.index(attr_name) repro_rec['attribute_name'] = attr_name repro_rec['attribute_id'] = attr_id repro_recs.append(repro_rec) return repro_recs The provided code snippet includes necessary dependencies for implementing the `export_2d_annotation` function. Write a Python function `def export_2d_annotation(root_path, info_path, version, mono3d=True)` to solve the following problem: Export 2d annotation from the info file and raw data. Args: root_path (str): Root path of the raw data. info_path (str): Path of the info file. version (str): Dataset version. mono3d (bool): Whether to export mono3d annotation. Default: True. Here is the function: def export_2d_annotation(root_path, info_path, version, mono3d=True): """Export 2d annotation from the info file and raw data. Args: root_path (str): Root path of the raw data. info_path (str): Path of the info file. version (str): Dataset version. mono3d (bool): Whether to export mono3d annotation. Default: True. """ # get bbox annotations for camera camera_types = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', ] nusc_infos = mmcv.load(info_path)['infos'] nusc = NuScenes(version=version, dataroot=root_path, verbose=True) # info_2d_list = [] cat2Ids = [ dict(id=nus_categories.index(cat_name), name=cat_name) for cat_name in nus_categories ] coco_ann_id = 0 coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) for info in mmcv.track_iter_progress(nusc_infos): for cam in camera_types: cam_info = info['cams'][cam] coco_infos = get_2d_boxes( nusc, cam_info['sample_data_token'], visibilities=['', '1', '2', '3', '4'], mono3d=mono3d) (height, width, _) = mmcv.imread(cam_info['data_path']).shape coco_2d_dict['images'].append( dict( file_name=cam_info['data_path'].split('data/nuscenes/') [-1], id=cam_info['sample_data_token'], token=info['token'], cam2ego_rotation=cam_info['sensor2ego_rotation'], cam2ego_translation=cam_info['sensor2ego_translation'], ego2global_rotation=info['ego2global_rotation'], ego2global_translation=info['ego2global_translation'], cam_intrinsic=cam_info['cam_intrinsic'], width=width, height=height)) for coco_info in coco_infos: if coco_info is None: continue # add an empty key for coco format coco_info['segmentation'] = [] coco_info['id'] = coco_ann_id coco_2d_dict['annotations'].append(coco_info) coco_ann_id += 1 if mono3d: json_prefix = f'{info_path[:-4]}_mono3d' else: json_prefix = f'{info_path[:-4]}' mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json')
Export 2d annotation from the info file and raw data. Args: root_path (str): Root path of the raw data. info_path (str): Path of the info file. version (str): Dataset version. mono3d (bool): Whether to export mono3d annotation. Default: True.
16,815
import pickle from os import path as osp import mmcv import numpy as np from mmcv import track_iter_progress from mmcv.ops import roi_align from pycocotools import mask as maskUtils from pycocotools.coco import COCO from mmdet3d.core.bbox import box_np_ops as box_np_ops from mmdet3d.datasets import build_dataset from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps def crop_image_patch_v2(pos_proposals, pos_assigned_gt_inds, gt_masks): import torch from torch.nn.modules.utils import _pair device = pos_proposals.device num_pos = pos_proposals.size(0) fake_inds = torch.arange(num_pos, device=device).to(dtype=pos_proposals.dtype)[ :, None ] rois = torch.cat([fake_inds, pos_proposals], dim=1) # Nx5 mask_size = _pair(28) rois = rois.to(device=device) gt_masks_th = ( torch.from_numpy(gt_masks) .to(device) .index_select(0, pos_assigned_gt_inds) .to(dtype=rois.dtype) ) # Use RoIAlign could apparently accelerate the training (~0.1s/iter) targets = roi_align(gt_masks_th, rois, mask_size[::-1], 1.0, 0, True).squeeze(1) return targets
null
16,816
import argparse import time import torch from mmcv import Config from mmcv.parallel import MMDataParallel from mmcv.runner import load_checkpoint, wrap_fp16_model from mmdet3d.datasets import build_dataloader, build_dataset from mmdet3d.models import build_fusion_model from torchpack.utils.config import configs from mmdet3d.utils import recursive_eval def parse_args(): parser = argparse.ArgumentParser(description="MMDet benchmark a model") parser.add_argument("config", help="test config file path") parser.add_argument("checkpoint", help="checkpoint file") parser.add_argument("--samples", default=2000, help="samples to benchmark") parser.add_argument("--log-interval", default=50, help="interval of logging") parser.add_argument("--fp16", action="store_true") args = parser.parse_args() return args
null
16,817
import json from langchain.schema import OutputParserException def parse_json_markdown(json_string: str) -> dict: # Remove the triple backticks if present json_string = json_string.strip() start_index = json_string.find("```json") end_index = json_string.find("```", start_index + len("```json")) if start_index != -1 and end_index != -1: extracted_content = json_string[start_index + len("```json"):end_index].strip() # Parse the JSON string into a Python dictionary parsed = json.loads(extracted_content) elif start_index != -1 and end_index == -1 and json_string.endswith("``"): end_index = json_string.find("``", start_index + len("```json")) extracted_content = json_string[start_index + len("```json"):end_index].strip() # Parse the JSON string into a Python dictionary parsed = json.loads(extracted_content) elif json_string.startswith("{"): # Parse the JSON string into a Python dictionary parsed = json.loads(json_string) else: raise Exception("Could not find JSON block in the output.") return parsed def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict: try: json_obj = parse_json_markdown(text) except json.JSONDecodeError as e: raise OutputParserException(f"Got invalid JSON object. Error: {e}") for key in expected_keys: if key not in json_obj: raise OutputParserException( f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}" ) return json_obj
null
16,818
import os from functools import wraps from flask import current_app, g, has_request_context, request from flask_login import user_logged_in from flask_login.config import EXEMPT_METHODS from werkzeug.exceptions import Unauthorized from werkzeug.local import LocalProxy from extensions.ext_database import db from models.account import Account, Tenant, TenantAccountJoin current_user = LocalProxy(lambda: _get_user()) def _get_user(): if has_request_context(): if "_login_user" not in g: current_app.login_manager._load_user() return g._login_user return None db = SQLAlchemy() class Account(UserMixin, db.Model): __tablename__ = 'accounts' __table_args__ = ( db.PrimaryKeyConstraint('id', name='account_pkey'), db.Index('account_email_idx', 'email') ) id = db.Column(UUID, server_default=db.text('uuid_generate_v4()')) name = db.Column(db.String(255), nullable=False) email = db.Column(db.String(255), nullable=False) password = db.Column(db.String(255), nullable=True) password_salt = db.Column(db.String(255), nullable=True) avatar = db.Column(db.String(255)) interface_language = db.Column(db.String(255)) interface_theme = db.Column(db.String(255)) timezone = db.Column(db.String(255)) last_login_at = db.Column(db.DateTime) last_login_ip = db.Column(db.String(255)) last_active_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) status = db.Column(db.String(16), nullable=False, server_default=db.text("'active'::character varying")) initialized_at = db.Column(db.DateTime) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) def is_password_set(self): return self.password is not None def current_tenant(self): return self._current_tenant def current_tenant(self, value): tenant = value ta = TenantAccountJoin.query.filter_by(tenant_id=tenant.id, account_id=self.id).first() if ta: tenant.current_role = ta.role else: tenant = None self._current_tenant = tenant def current_tenant_id(self): return self._current_tenant.id def current_tenant_id(self, value): try: tenant_account_join = db.session.query(Tenant, TenantAccountJoin) \ .filter(Tenant.id == value) \ .filter(TenantAccountJoin.tenant_id == Tenant.id) \ .filter(TenantAccountJoin.account_id == self.id) \ .one_or_none() if tenant_account_join: tenant, ta = tenant_account_join tenant.current_role = ta.role else: tenant = None except: tenant = None self._current_tenant = tenant def get_status(self) -> AccountStatus: status_str = self.status return AccountStatus(status_str) def get_by_openid(cls, provider: str, open_id: str) -> db.Model: account_integrate = db.session.query(AccountIntegrate). \ filter(AccountIntegrate.provider == provider, AccountIntegrate.open_id == open_id). \ one_or_none() if account_integrate: return db.session.query(Account). \ filter(Account.id == account_integrate.account_id). \ one_or_none() return None def get_integrates(self) -> list[db.Model]: ai = db.Model return db.session.query(ai).filter( ai.account_id == self.id ).all() # check current_user.current_tenant.current_role in ['admin', 'owner'] def is_admin_or_owner(self): return self._current_tenant.current_role in ['admin', 'owner'] class Tenant(db.Model): __tablename__ = 'tenants' __table_args__ = ( db.PrimaryKeyConstraint('id', name='tenant_pkey'), ) id = db.Column(UUID, server_default=db.text('uuid_generate_v4()')) name = db.Column(db.String(255), nullable=False) encrypt_public_key = db.Column(db.Text) plan = db.Column(db.String(255), nullable=False, server_default=db.text("'basic'::character varying")) status = db.Column(db.String(255), nullable=False, server_default=db.text("'normal'::character varying")) custom_config = db.Column(db.Text) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) def get_accounts(self) -> list[db.Model]: Account = db.Model return db.session.query(Account).filter( Account.id == TenantAccountJoin.account_id, TenantAccountJoin.tenant_id == self.id ).all() def custom_config_dict(self) -> dict: return json.loads(self.custom_config) if self.custom_config else {} def custom_config_dict(self, value: dict): self.custom_config = json.dumps(value) class TenantAccountJoin(db.Model): __tablename__ = 'tenant_account_joins' __table_args__ = ( db.PrimaryKeyConstraint('id', name='tenant_account_join_pkey'), db.Index('tenant_account_join_account_id_idx', 'account_id'), db.Index('tenant_account_join_tenant_id_idx', 'tenant_id'), db.UniqueConstraint('tenant_id', 'account_id', name='unique_tenant_account_join') ) id = db.Column(UUID, server_default=db.text('uuid_generate_v4()')) tenant_id = db.Column(UUID, nullable=False) account_id = db.Column(UUID, nullable=False) current = db.Column(db.Boolean, nullable=False, server_default=db.text('false')) role = db.Column(db.String(16), nullable=False, server_default='normal') invited_by = db.Column(UUID, nullable=True) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) The provided code snippet includes necessary dependencies for implementing the `login_required` function. Write a Python function `def login_required(func)` to solve the following problem: If you decorate a view with this, it will ensure that the current user is logged in and authenticated before calling the actual view. (If they are not, it calls the :attr:`LoginManager.unauthorized` callback.) For example:: @app.route('/post') @login_required def post(): pass If there are only certain times you need to require that your user is logged in, you can do so with:: if not current_user.is_authenticated: return current_app.login_manager.unauthorized() ...which is essentially the code that this function adds to your views. It can be convenient to globally turn off authentication when unit testing. To enable this, if the application configuration variable `LOGIN_DISABLED` is set to `True`, this decorator will be ignored. .. Note :: Per `W3 guidelines for CORS preflight requests <http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0>`_, HTTP ``OPTIONS`` requests are exempt from login checks. :param func: The view function to decorate. :type func: function Here is the function: def login_required(func): """ If you decorate a view with this, it will ensure that the current user is logged in and authenticated before calling the actual view. (If they are not, it calls the :attr:`LoginManager.unauthorized` callback.) For example:: @app.route('/post') @login_required def post(): pass If there are only certain times you need to require that your user is logged in, you can do so with:: if not current_user.is_authenticated: return current_app.login_manager.unauthorized() ...which is essentially the code that this function adds to your views. It can be convenient to globally turn off authentication when unit testing. To enable this, if the application configuration variable `LOGIN_DISABLED` is set to `True`, this decorator will be ignored. .. Note :: Per `W3 guidelines for CORS preflight requests <http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0>`_, HTTP ``OPTIONS`` requests are exempt from login checks. :param func: The view function to decorate. :type func: function """ @wraps(func) def decorated_view(*args, **kwargs): auth_header = request.headers.get('Authorization') admin_api_key_enable = os.getenv('ADMIN_API_KEY_ENABLE', default='False') if admin_api_key_enable: if auth_header: if ' ' not in auth_header: raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.') auth_scheme, auth_token = auth_header.split(None, 1) auth_scheme = auth_scheme.lower() if auth_scheme != 'bearer': raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.') admin_api_key = os.getenv('ADMIN_API_KEY') if admin_api_key: if os.getenv('ADMIN_API_KEY') == auth_token: workspace_id = request.headers.get('X-WORKSPACE-ID') if workspace_id: tenant_account_join = db.session.query(Tenant, TenantAccountJoin) \ .filter(Tenant.id == workspace_id) \ .filter(TenantAccountJoin.tenant_id == Tenant.id) \ .filter(TenantAccountJoin.role == 'owner') \ .one_or_none() if tenant_account_join: tenant, ta = tenant_account_join account = Account.query.filter_by(id=ta.account_id).first() # Login admin if account: account.current_tenant = tenant current_app.login_manager._update_request_context_with_user(account) user_logged_in.send(current_app._get_current_object(), user=_get_user()) if request.method in EXEMPT_METHODS or current_app.config.get("LOGIN_DISABLED"): pass elif not current_user.is_authenticated: return current_app.login_manager.unauthorized() # flask 1.x compatibility # current_app.ensure_sync is only available in Flask >= 2.0 if callable(getattr(current_app, "ensure_sync", None)): return current_app.ensure_sync(func)(*args, **kwargs) return func(*args, **kwargs) return decorated_view
If you decorate a view with this, it will ensure that the current user is logged in and authenticated before calling the actual view. (If they are not, it calls the :attr:`LoginManager.unauthorized` callback.) For example:: @app.route('/post') @login_required def post(): pass If there are only certain times you need to require that your user is logged in, you can do so with:: if not current_user.is_authenticated: return current_app.login_manager.unauthorized() ...which is essentially the code that this function adds to your views. It can be convenient to globally turn off authentication when unit testing. To enable this, if the application configuration variable `LOGIN_DISABLED` is set to `True`, this decorator will be ignored. .. Note :: Per `W3 guidelines for CORS preflight requests <http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0>`_, HTTP ``OPTIONS`` requests are exempt from login checks. :param func: The view function to decorate. :type func: function
16,819
import random import re import string import subprocess import uuid from datetime import datetime from hashlib import sha256 from zoneinfo import available_timezones from flask_restful import fields def run(script): return subprocess.getstatusoutput('source /root/.bashrc && ' + script)
null
16,820
import random import re import string import subprocess import uuid from datetime import datetime from hashlib import sha256 from zoneinfo import available_timezones from flask_restful import fields def email(email): # Define a regex pattern for email addresses pattern = r"^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$" # Check if the email matches the pattern if re.match(pattern, email) is not None: return email error = ('{email} is not a valid email.' .format(email=email)) raise ValueError(error)
null
16,821
import random import re import string import subprocess import uuid from datetime import datetime from hashlib import sha256 from zoneinfo import available_timezones from flask_restful import fields def uuid_value(value): if value == '': return str(value) try: uuid_obj = uuid.UUID(value) return str(uuid_obj) except ValueError: error = ('{value} is not a valid uuid.' .format(value=value)) raise ValueError(error)
null
16,822
import random import re import string import subprocess import uuid from datetime import datetime from hashlib import sha256 from zoneinfo import available_timezones from flask_restful import fields def timestamp_value(timestamp): try: int_timestamp = int(timestamp) if int_timestamp < 0: raise ValueError return int_timestamp except ValueError: error = ('{timestamp} is not a valid timestamp.' .format(timestamp=timestamp)) raise ValueError(error)
null
16,823
import random import re import string import subprocess import uuid from datetime import datetime from hashlib import sha256 from zoneinfo import available_timezones from flask_restful import fields def _get_float(value): try: return float(value) except (TypeError, ValueError): raise ValueError('{} is not a valid float'.format(value))
null
16,824
import random import re import string import subprocess import uuid from datetime import datetime from hashlib import sha256 from zoneinfo import available_timezones from flask_restful import fields def timezone(timezone_string): if timezone_string and timezone_string in available_timezones(): return timezone_string error = ('{timezone_string} is not a valid timezone.' .format(timezone_string=timezone_string)) raise ValueError(error)
null
16,825
import random import re import string import subprocess import uuid from datetime import datetime from hashlib import sha256 from zoneinfo import available_timezones from flask_restful import fields def generate_string(n): letters_digits = string.ascii_letters + string.digits result = "" for i in range(n): result += random.choice(letters_digits) return result
null
16,826
import random import re import string import subprocess import uuid from datetime import datetime from hashlib import sha256 from zoneinfo import available_timezones from flask_restful import fields def get_remote_ip(request): if request.headers.get('CF-Connecting-IP'): return request.headers.get('Cf-Connecting-Ip') elif request.headers.getlist("X-Forwarded-For"): return request.headers.getlist("X-Forwarded-For")[0] else: return request.remote_addr
null
16,827
import base64 import binascii import hashlib import re def hash_password(password_str, salt_byte): def compare_password(password_str, password_hashed_base64, salt_base64): # compare password for login return hash_password(password_str, base64.b64decode(salt_base64)) == base64.b64decode(password_hashed_base64)
null
16,828
import os from werkzeug.exceptions import Unauthorized import json import logging import threading import time import warnings from flask import Flask, Response, request from flask_cors import CORS from commands import register_commands from config import CloudEditionConfig, Config from extensions import ( ext_celery, ext_code_based_extension, ext_compress, ext_database, ext_hosting_provider, ext_login, ext_mail, ext_migrate, ext_redis, ext_sentry, ext_storage, ) from extensions.ext_database import db from extensions.ext_login import login_manager from libs.passport import PassportService from services.account_service import AccountService from events import event_handlers from models import account, dataset, model, source, task, tool, tools, web class DifyApp(Flask): pass config_type = os.getenv('EDITION', default='SELF_HOSTED') def initialize_extensions(app): # Since the application instance is now created, pass it to each Flask # extension instance to bind it to the Flask application instance (app) ext_compress.init_app(app) ext_code_based_extension.init() ext_database.init_app(app) ext_migrate.init(app, db) ext_redis.init_app(app) ext_storage.init_app(app) ext_celery.init_app(app) ext_login.init_app(app) ext_mail.init_app(app) ext_hosting_provider.init_app(app) ext_sentry.init_app(app) def register_blueprints(app): from controllers.console import bp as console_app_bp from controllers.files import bp as files_bp from controllers.service_api import bp as service_api_bp from controllers.web import bp as web_bp CORS(service_api_bp, allow_headers=['Content-Type', 'Authorization', 'X-App-Code'], methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH'] ) app.register_blueprint(service_api_bp) CORS(web_bp, resources={ r"/*": {"origins": app.config['WEB_API_CORS_ALLOW_ORIGINS']}}, supports_credentials=True, allow_headers=['Content-Type', 'Authorization', 'X-App-Code'], methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH'], expose_headers=['X-Version', 'X-Env'] ) app.register_blueprint(web_bp) CORS(console_app_bp, resources={ r"/*": {"origins": app.config['CONSOLE_CORS_ALLOW_ORIGINS']}}, supports_credentials=True, allow_headers=['Content-Type', 'Authorization'], methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH'], expose_headers=['X-Version', 'X-Env'] ) app.register_blueprint(console_app_bp) CORS(files_bp, allow_headers=['Content-Type'], methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH'] ) app.register_blueprint(files_bp) app = create_app() if app.config['TESTING']: print("App is running in TESTING mode") if __name__ == '__main__': app.run(host='0.0.0.0', port=5001) """ Reset the encrypted key pair of workspace for encrypt LLM credentials. After the reset, all LLM credentials will become invalid, requiring re-entry. Only support SELF_HOSTED mode. """ def: def register_commands(app): app.cli.add_command(reset_password) app.cli.add_command(reset_email) app.cli.add_command(reset_encrypt_key_pair) app.cli.add_command(vdb_migrate) class Config: """Application configuration class.""" def __init__(self): # ------------------------ # General Configurations. # ------------------------ self.CURRENT_VERSION = "0.5.9" self.COMMIT_SHA = get_env('COMMIT_SHA') self.EDITION = "SELF_HOSTED" self.DEPLOY_ENV = get_env('DEPLOY_ENV') self.TESTING = False self.LOG_LEVEL = get_env('LOG_LEVEL') # The backend URL prefix of the console API. # used to concatenate the login authorization callback or notion integration callback. self.CONSOLE_API_URL = get_env('CONSOLE_API_URL') # The front-end URL prefix of the console web. # used to concatenate some front-end addresses and for CORS configuration use. self.CONSOLE_WEB_URL = get_env('CONSOLE_WEB_URL') # WebApp Url prefix. # used to display WebAPP API Base Url to the front-end. self.APP_WEB_URL = get_env('APP_WEB_URL') # Service API Url prefix. # used to display Service API Base Url to the front-end. self.SERVICE_API_URL = get_env('SERVICE_API_URL') # File preview or download Url prefix. # used to display File preview or download Url to the front-end or as Multi-model inputs; # Url is signed and has expiration time. self.FILES_URL = get_env('FILES_URL') if get_env('FILES_URL') else self.CONSOLE_API_URL # Your App secret key will be used for securely signing the session cookie # Make sure you are changing this key for your deployment with a strong key. # You can generate a strong key using `openssl rand -base64 42`. # Alternatively you can set it with `SECRET_KEY` environment variable. self.SECRET_KEY = get_env('SECRET_KEY') # cors settings self.CONSOLE_CORS_ALLOW_ORIGINS = get_cors_allow_origins( 'CONSOLE_CORS_ALLOW_ORIGINS', self.CONSOLE_WEB_URL) self.WEB_API_CORS_ALLOW_ORIGINS = get_cors_allow_origins( 'WEB_API_CORS_ALLOW_ORIGINS', '*') # check update url self.CHECK_UPDATE_URL = get_env('CHECK_UPDATE_URL') # ------------------------ # Database Configurations. # ------------------------ db_credentials = { key: get_env(key) for key in ['DB_USERNAME', 'DB_PASSWORD', 'DB_HOST', 'DB_PORT', 'DB_DATABASE', 'DB_CHARSET'] } db_extras = f"?client_encoding={db_credentials['DB_CHARSET']}" if db_credentials['DB_CHARSET'] else "" self.SQLALCHEMY_DATABASE_URI = f"postgresql://{db_credentials['DB_USERNAME']}:{db_credentials['DB_PASSWORD']}@{db_credentials['DB_HOST']}:{db_credentials['DB_PORT']}/{db_credentials['DB_DATABASE']}{db_extras}" self.SQLALCHEMY_ENGINE_OPTIONS = { 'pool_size': int(get_env('SQLALCHEMY_POOL_SIZE')), 'pool_recycle': int(get_env('SQLALCHEMY_POOL_RECYCLE')) } self.SQLALCHEMY_ECHO = get_bool_env('SQLALCHEMY_ECHO') # ------------------------ # Redis Configurations. # ------------------------ self.REDIS_HOST = get_env('REDIS_HOST') self.REDIS_PORT = get_env('REDIS_PORT') self.REDIS_USERNAME = get_env('REDIS_USERNAME') self.REDIS_PASSWORD = get_env('REDIS_PASSWORD') self.REDIS_DB = get_env('REDIS_DB') self.REDIS_USE_SSL = get_bool_env('REDIS_USE_SSL') # ------------------------ # Celery worker Configurations. # ------------------------ self.CELERY_BROKER_URL = get_env('CELERY_BROKER_URL') self.CELERY_BACKEND = get_env('CELERY_BACKEND') self.CELERY_RESULT_BACKEND = 'db+{}'.format(self.SQLALCHEMY_DATABASE_URI) \ if self.CELERY_BACKEND == 'database' else self.CELERY_BROKER_URL self.BROKER_USE_SSL = self.CELERY_BROKER_URL.startswith('rediss://') # ------------------------ # File Storage Configurations. # ------------------------ self.STORAGE_TYPE = get_env('STORAGE_TYPE') self.STORAGE_LOCAL_PATH = get_env('STORAGE_LOCAL_PATH') self.S3_ENDPOINT = get_env('S3_ENDPOINT') self.S3_BUCKET_NAME = get_env('S3_BUCKET_NAME') self.S3_ACCESS_KEY = get_env('S3_ACCESS_KEY') self.S3_SECRET_KEY = get_env('S3_SECRET_KEY') self.S3_REGION = get_env('S3_REGION') # ------------------------ # Vector Store Configurations. # Currently, only support: qdrant, milvus, zilliz, weaviate # ------------------------ self.VECTOR_STORE = get_env('VECTOR_STORE') self.KEYWORD_STORE = get_env('KEYWORD_STORE') # qdrant settings self.QDRANT_URL = get_env('QDRANT_URL') self.QDRANT_API_KEY = get_env('QDRANT_API_KEY') self.QDRANT_CLIENT_TIMEOUT = get_env('QDRANT_CLIENT_TIMEOUT') # milvus / zilliz setting self.MILVUS_HOST = get_env('MILVUS_HOST') self.MILVUS_PORT = get_env('MILVUS_PORT') self.MILVUS_USER = get_env('MILVUS_USER') self.MILVUS_PASSWORD = get_env('MILVUS_PASSWORD') self.MILVUS_SECURE = get_env('MILVUS_SECURE') # weaviate settings self.WEAVIATE_ENDPOINT = get_env('WEAVIATE_ENDPOINT') self.WEAVIATE_API_KEY = get_env('WEAVIATE_API_KEY') self.WEAVIATE_GRPC_ENABLED = get_bool_env('WEAVIATE_GRPC_ENABLED') self.WEAVIATE_BATCH_SIZE = int(get_env('WEAVIATE_BATCH_SIZE')) # ------------------------ # Mail Configurations. # ------------------------ self.MAIL_TYPE = get_env('MAIL_TYPE') self.MAIL_DEFAULT_SEND_FROM = get_env('MAIL_DEFAULT_SEND_FROM') self.RESEND_API_KEY = get_env('RESEND_API_KEY') self.RESEND_API_URL = get_env('RESEND_API_URL') # SMTP settings self.SMTP_SERVER = get_env('SMTP_SERVER') self.SMTP_PORT = get_env('SMTP_PORT') self.SMTP_USERNAME = get_env('SMTP_USERNAME') self.SMTP_PASSWORD = get_env('SMTP_PASSWORD') self.SMTP_USE_TLS = get_bool_env('SMTP_USE_TLS') # ------------------------ # Workpace Configurations. # ------------------------ self.INVITE_EXPIRY_HOURS = int(get_env('INVITE_EXPIRY_HOURS')) # ------------------------ # Sentry Configurations. # ------------------------ self.SENTRY_DSN = get_env('SENTRY_DSN') self.SENTRY_TRACES_SAMPLE_RATE = float(get_env('SENTRY_TRACES_SAMPLE_RATE')) self.SENTRY_PROFILES_SAMPLE_RATE = float(get_env('SENTRY_PROFILES_SAMPLE_RATE')) # ------------------------ # Business Configurations. # ------------------------ # multi model send image format, support base64, url, default is base64 self.MULTIMODAL_SEND_IMAGE_FORMAT = get_env('MULTIMODAL_SEND_IMAGE_FORMAT') # Dataset Configurations. self.CLEAN_DAY_SETTING = get_env('CLEAN_DAY_SETTING') # File upload Configurations. self.UPLOAD_FILE_SIZE_LIMIT = int(get_env('UPLOAD_FILE_SIZE_LIMIT')) self.UPLOAD_FILE_BATCH_LIMIT = int(get_env('UPLOAD_FILE_BATCH_LIMIT')) self.UPLOAD_IMAGE_FILE_SIZE_LIMIT = int(get_env('UPLOAD_IMAGE_FILE_SIZE_LIMIT')) # Moderation in app Configurations. self.OUTPUT_MODERATION_BUFFER_SIZE = int(get_env('OUTPUT_MODERATION_BUFFER_SIZE')) # Notion integration setting self.NOTION_CLIENT_ID = get_env('NOTION_CLIENT_ID') self.NOTION_CLIENT_SECRET = get_env('NOTION_CLIENT_SECRET') self.NOTION_INTEGRATION_TYPE = get_env('NOTION_INTEGRATION_TYPE') self.NOTION_INTERNAL_SECRET = get_env('NOTION_INTERNAL_SECRET') self.NOTION_INTEGRATION_TOKEN = get_env('NOTION_INTEGRATION_TOKEN') # ------------------------ # Platform Configurations. # ------------------------ self.HOSTED_OPENAI_API_KEY = get_env('HOSTED_OPENAI_API_KEY') self.HOSTED_OPENAI_API_BASE = get_env('HOSTED_OPENAI_API_BASE') self.HOSTED_OPENAI_API_ORGANIZATION = get_env('HOSTED_OPENAI_API_ORGANIZATION') self.HOSTED_OPENAI_TRIAL_ENABLED = get_bool_env('HOSTED_OPENAI_TRIAL_ENABLED') self.HOSTED_OPENAI_TRIAL_MODELS = get_env('HOSTED_OPENAI_TRIAL_MODELS') self.HOSTED_OPENAI_QUOTA_LIMIT = int(get_env('HOSTED_OPENAI_QUOTA_LIMIT')) self.HOSTED_OPENAI_PAID_ENABLED = get_bool_env('HOSTED_OPENAI_PAID_ENABLED') self.HOSTED_OPENAI_PAID_MODELS = get_env('HOSTED_OPENAI_PAID_MODELS') self.HOSTED_AZURE_OPENAI_ENABLED = get_bool_env('HOSTED_AZURE_OPENAI_ENABLED') self.HOSTED_AZURE_OPENAI_API_KEY = get_env('HOSTED_AZURE_OPENAI_API_KEY') self.HOSTED_AZURE_OPENAI_API_BASE = get_env('HOSTED_AZURE_OPENAI_API_BASE') self.HOSTED_AZURE_OPENAI_QUOTA_LIMIT = int(get_env('HOSTED_AZURE_OPENAI_QUOTA_LIMIT')) self.HOSTED_ANTHROPIC_API_BASE = get_env('HOSTED_ANTHROPIC_API_BASE') self.HOSTED_ANTHROPIC_API_KEY = get_env('HOSTED_ANTHROPIC_API_KEY') self.HOSTED_ANTHROPIC_TRIAL_ENABLED = get_bool_env('HOSTED_ANTHROPIC_TRIAL_ENABLED') self.HOSTED_ANTHROPIC_QUOTA_LIMIT = int(get_env('HOSTED_ANTHROPIC_QUOTA_LIMIT')) self.HOSTED_ANTHROPIC_PAID_ENABLED = get_bool_env('HOSTED_ANTHROPIC_PAID_ENABLED') self.HOSTED_MINIMAX_ENABLED = get_bool_env('HOSTED_MINIMAX_ENABLED') self.HOSTED_SPARK_ENABLED = get_bool_env('HOSTED_SPARK_ENABLED') self.HOSTED_ZHIPUAI_ENABLED = get_bool_env('HOSTED_ZHIPUAI_ENABLED') self.HOSTED_MODERATION_ENABLED = get_bool_env('HOSTED_MODERATION_ENABLED') self.HOSTED_MODERATION_PROVIDERS = get_env('HOSTED_MODERATION_PROVIDERS') self.ETL_TYPE = get_env('ETL_TYPE') self.UNSTRUCTURED_API_URL = get_env('UNSTRUCTURED_API_URL') self.BILLING_ENABLED = get_bool_env('BILLING_ENABLED') self.CAN_REPLACE_LOGO = get_bool_env('CAN_REPLACE_LOGO') self.BATCH_UPLOAD_LIMIT = get_env('BATCH_UPLOAD_LIMIT') self.API_COMPRESSION_ENABLED = get_bool_env('API_COMPRESSION_ENABLED') class CloudEditionConfig(Config): def __init__(self): super().__init__() self.EDITION = "CLOUD" self.GITHUB_CLIENT_ID = get_env('GITHUB_CLIENT_ID') self.GITHUB_CLIENT_SECRET = get_env('GITHUB_CLIENT_SECRET') self.GOOGLE_CLIENT_ID = get_env('GOOGLE_CLIENT_ID') self.GOOGLE_CLIENT_SECRET = get_env('GOOGLE_CLIENT_SECRET') self.OAUTH_REDIRECT_PATH = get_env('OAUTH_REDIRECT_PATH') def create_app(test_config=None) -> Flask: app = DifyApp(__name__) if test_config: app.config.from_object(test_config) else: if config_type == "CLOUD": app.config.from_object(CloudEditionConfig()) else: app.config.from_object(Config()) app.secret_key = app.config['SECRET_KEY'] logging.basicConfig(level=app.config.get('LOG_LEVEL', 'INFO')) initialize_extensions(app) register_blueprints(app) register_commands(app) return app
null
16,829
import os from werkzeug.exceptions import Unauthorized import json import logging import threading import time import warnings from flask import Flask, Response, request from flask_cors import CORS from commands import register_commands from config import CloudEditionConfig, Config from extensions import ( ext_celery, ext_code_based_extension, ext_compress, ext_database, ext_hosting_provider, ext_login, ext_mail, ext_migrate, ext_redis, ext_sentry, ext_storage, ) from extensions.ext_database import db from extensions.ext_login import login_manager from libs.passport import PassportService from services.account_service import AccountService from events import event_handlers from models import account, dataset, model, source, task, tool, tools, web class PassportService: def __init__(self): self.sk = current_app.config.get('SECRET_KEY') def issue(self, payload): return jwt.encode(payload, self.sk, algorithm='HS256') def verify(self, token): try: return jwt.decode(token, self.sk, algorithms=['HS256']) except jwt.exceptions.InvalidSignatureError: raise Unauthorized('Invalid token signature.') except jwt.exceptions.DecodeError: raise Unauthorized('Invalid token.') except jwt.exceptions.ExpiredSignatureError: raise Unauthorized('Token has expired.') class AccountService: def load_user(user_id: str) -> Account: account = Account.query.filter_by(id=user_id).first() if not account: return None if account.status in [AccountStatus.BANNED.value, AccountStatus.CLOSED.value]: raise Forbidden('Account is banned or closed.') current_tenant = TenantAccountJoin.query.filter_by(account_id=account.id, current=True).first() if current_tenant: account.current_tenant_id = current_tenant.tenant_id else: available_ta = TenantAccountJoin.query.filter_by(account_id=account.id) \ .order_by(TenantAccountJoin.id.asc()).first() if not available_ta: return None account.current_tenant_id = available_ta.tenant_id available_ta.current = True db.session.commit() if datetime.utcnow() - account.last_active_at > timedelta(minutes=10): account.last_active_at = datetime.utcnow() db.session.commit() return account def get_account_jwt_token(account): payload = { "user_id": account.id, "exp": datetime.utcnow() + timedelta(days=30), "iss": current_app.config['EDITION'], "sub": 'Console API Passport', } token = PassportService().issue(payload) return token def authenticate(email: str, password: str) -> Account: """authenticate account with email and password""" account = Account.query.filter_by(email=email).first() if not account: raise AccountLoginError('Invalid email or password.') if account.status == AccountStatus.BANNED.value or account.status == AccountStatus.CLOSED.value: raise AccountLoginError('Account is banned or closed.') if account.status == AccountStatus.PENDING.value: account.status = AccountStatus.ACTIVE.value account.initialized_at = datetime.utcnow() db.session.commit() if account.password is None or not compare_password(password, account.password, account.password_salt): raise AccountLoginError('Invalid email or password.') return account def update_account_password(account, password, new_password): """update account password""" if account.password and not compare_password(password, account.password, account.password_salt): raise CurrentPasswordIncorrectError("Current password is incorrect.") # may be raised valid_password(new_password) # generate password salt salt = secrets.token_bytes(16) base64_salt = base64.b64encode(salt).decode() # encrypt password with salt password_hashed = hash_password(new_password, salt) base64_password_hashed = base64.b64encode(password_hashed).decode() account.password = base64_password_hashed account.password_salt = base64_salt db.session.commit() return account def create_account(email: str, name: str, interface_language: str, password: str = None, interface_theme: str = 'light', timezone: str = 'America/New_York', ) -> Account: """create account""" account = Account() account.email = email account.name = name if password: # generate password salt salt = secrets.token_bytes(16) base64_salt = base64.b64encode(salt).decode() # encrypt password with salt password_hashed = hash_password(password, salt) base64_password_hashed = base64.b64encode(password_hashed).decode() account.password = base64_password_hashed account.password_salt = base64_salt account.interface_language = interface_language account.interface_theme = interface_theme # Set timezone based on language account.timezone = language_timezone_mapping.get(interface_language, 'UTC') db.session.add(account) db.session.commit() return account def link_account_integrate(provider: str, open_id: str, account: Account) -> None: """Link account integrate""" try: # Query whether there is an existing binding record for the same provider account_integrate: Optional[AccountIntegrate] = AccountIntegrate.query.filter_by(account_id=account.id, provider=provider).first() if account_integrate: # If it exists, update the record account_integrate.open_id = open_id account_integrate.encrypted_token = "" # todo account_integrate.updated_at = datetime.utcnow() else: # If it does not exist, create a new record account_integrate = AccountIntegrate(account_id=account.id, provider=provider, open_id=open_id, encrypted_token="") db.session.add(account_integrate) db.session.commit() logging.info(f'Account {account.id} linked {provider} account {open_id}.') except Exception as e: logging.exception(f'Failed to link {provider} account {open_id} to Account {account.id}') raise LinkAccountIntegrateError('Failed to link account.') from e def close_account(account: Account) -> None: """todo: Close account""" account.status = AccountStatus.CLOSED.value db.session.commit() def update_account(account, **kwargs): """Update account fields""" for field, value in kwargs.items(): if hasattr(account, field): setattr(account, field, value) else: raise AttributeError(f"Invalid field: {field}") db.session.commit() return account def update_last_login(account: Account, request) -> None: """Update last login time and ip""" account.last_login_at = datetime.utcnow() account.last_login_ip = get_remote_ip(request) db.session.add(account) db.session.commit() logging.info(f'Account {account.id} logged in successfully.') The provided code snippet includes necessary dependencies for implementing the `load_user_from_request` function. Write a Python function `def load_user_from_request(request_from_flask_login)` to solve the following problem: Load user based on the request. Here is the function: def load_user_from_request(request_from_flask_login): """Load user based on the request.""" if request.blueprint == 'console': # Check if the user_id contains a dot, indicating the old format auth_header = request.headers.get('Authorization', '') if not auth_header: auth_token = request.args.get('_token') if not auth_token: raise Unauthorized('Invalid Authorization token.') else: if ' ' not in auth_header: raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.') auth_scheme, auth_token = auth_header.split(None, 1) auth_scheme = auth_scheme.lower() if auth_scheme != 'bearer': raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.') decoded = PassportService().verify(auth_token) user_id = decoded.get('user_id') return AccountService.load_user(user_id) else: return None
Load user based on the request.
16,830
import os from werkzeug.exceptions import Unauthorized import json import logging import threading import time import warnings from flask import Flask, Response, request from flask_cors import CORS from commands import register_commands from config import CloudEditionConfig, Config from extensions import ( ext_celery, ext_code_based_extension, ext_compress, ext_database, ext_hosting_provider, ext_login, ext_mail, ext_migrate, ext_redis, ext_sentry, ext_storage, ) from extensions.ext_database import db from extensions.ext_login import login_manager from libs.passport import PassportService from services.account_service import AccountService from events import event_handlers from models import account, dataset, model, source, task, tool, tools, web The provided code snippet includes necessary dependencies for implementing the `unauthorized_handler` function. Write a Python function `def unauthorized_handler()` to solve the following problem: Handle unauthorized requests. Here is the function: def unauthorized_handler(): """Handle unauthorized requests.""" return Response(json.dumps({ 'code': 'unauthorized', 'message': "Unauthorized." }), status=401, content_type="application/json")
Handle unauthorized requests.
16,831
import os from werkzeug.exceptions import Unauthorized import json import logging import threading import time import warnings from flask import Flask, Response, request from flask_cors import CORS from commands import register_commands from config import CloudEditionConfig, Config from extensions import ( ext_celery, ext_code_based_extension, ext_compress, ext_database, ext_hosting_provider, ext_login, ext_mail, ext_migrate, ext_redis, ext_sentry, ext_storage, ) from extensions.ext_database import db from extensions.ext_login import login_manager from libs.passport import PassportService from services.account_service import AccountService from events import event_handlers from models import account, dataset, model, source, task, tool, tools, web app = create_app() if app.config['TESTING']: print("App is running in TESTING mode") The provided code snippet includes necessary dependencies for implementing the `after_request` function. Write a Python function `def after_request(response)` to solve the following problem: Add Version headers to the response. Here is the function: def after_request(response): """Add Version headers to the response.""" response.set_cookie('remember_token', '', expires=0) response.headers.add('X-Version', app.config['CURRENT_VERSION']) response.headers.add('X-Env', app.config['DEPLOY_ENV']) return response
Add Version headers to the response.
16,832
import os from werkzeug.exceptions import Unauthorized import json import logging import threading import time import warnings from flask import Flask, Response, request from flask_cors import CORS from commands import register_commands from config import CloudEditionConfig, Config from extensions import ( ext_celery, ext_code_based_extension, ext_compress, ext_database, ext_hosting_provider, ext_login, ext_mail, ext_migrate, ext_redis, ext_sentry, ext_storage, ) from extensions.ext_database import db from extensions.ext_login import login_manager from libs.passport import PassportService from services.account_service import AccountService from events import event_handlers from models import account, dataset, model, source, task, tool, tools, web app = create_app() if app.config['TESTING']: print("App is running in TESTING mode") def health(): return Response(json.dumps({ 'status': 'ok', 'version': app.config['CURRENT_VERSION'] }), status=200, content_type="application/json")
null
16,833
import os from werkzeug.exceptions import Unauthorized import json import logging import threading import time import warnings from flask import Flask, Response, request from flask_cors import CORS from commands import register_commands from config import CloudEditionConfig, Config from extensions import ( ext_celery, ext_code_based_extension, ext_compress, ext_database, ext_hosting_provider, ext_login, ext_mail, ext_migrate, ext_redis, ext_sentry, ext_storage, ) from extensions.ext_database import db from extensions.ext_login import login_manager from libs.passport import PassportService from services.account_service import AccountService from events import event_handlers from models import account, dataset, model, source, task, tool, tools, web def threads(): num_threads = threading.active_count() threads = threading.enumerate() thread_list = [] for thread in threads: thread_name = thread.name thread_id = thread.ident is_alive = thread.is_alive() thread_list.append({ 'name': thread_name, 'id': thread_id, 'is_alive': is_alive }) return { 'thread_num': num_threads, 'threads': thread_list }
null
16,834
import os from werkzeug.exceptions import Unauthorized import json import logging import threading import time import warnings from flask import Flask, Response, request from flask_cors import CORS from commands import register_commands from config import CloudEditionConfig, Config from extensions import ( ext_celery, ext_code_based_extension, ext_compress, ext_database, ext_hosting_provider, ext_login, ext_mail, ext_migrate, ext_redis, ext_sentry, ext_storage, ) from extensions.ext_database import db from extensions.ext_login import login_manager from libs.passport import PassportService from services.account_service import AccountService from events import event_handlers from models import account, dataset, model, source, task, tool, tools, web db = SQLAlchemy() def pool_stat(): engine = db.engine return { 'pool_size': engine.pool.size(), 'checked_in_connections': engine.pool.checkedin(), 'checked_out_connections': engine.pool.checkedout(), 'overflow_connections': engine.pool.overflow(), 'connection_timeout': engine.pool.timeout(), 'recycle_time': db.engine.pool._recycle }
null
16,835
from __future__ import annotations from collections.abc import Iterable, Mapping from typing import TypeVar from ._base_type import NotGiven class NotGiven(pydantic.BaseModel): """ A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior). For example: ```py def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ... get(timeout=1) # 1s timeout get(timeout=None) # No timeout get() # Default timeout behavior, which may not be statically known at the method definition. ``` """ def __bool__(self) -> Literal[False]: return False def __repr__(self) -> str: return "NOT_GIVEN" def remove_notgiven_indict(obj): if obj is None or (not isinstance(obj, Mapping)): return obj return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)}
null
16,836
from __future__ import annotations from collections.abc import Iterable, Mapping from typing import TypeVar from ._base_type import NotGiven _T = TypeVar("_T") def flatten(t: Iterable[Iterable[_T]]) -> list[_T]: return [item for sublist in t for item in sublist]
null
16,837
from __future__ import annotations import inspect from collections.abc import Mapping from typing import Any, Union, cast import httpx import pydantic from httpx import URL, Timeout from . import _errors from ._base_type import NOT_GIVEN, Body, Data, Headers, NotGiven, Query, RequestFiles, ResponseT from ._errors import APIResponseValidationError, APIStatusError, APITimeoutError from ._files import make_httpx_files from ._request_opt import ClientRequestParam, UserRequestInput from ._response import HttpResponse from ._sse_client import StreamResponse from ._utils import flatten from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT def _merge_map(map1: Mapping, map2: Mapping) -> Mapping: merged = {**map1, **map2} return {key: val for key, val in merged.items() if val is not None}
null
16,838
from __future__ import annotations import inspect from collections.abc import Mapping from typing import Any, Union, cast import httpx import pydantic from httpx import URL, Timeout from . import _errors from ._base_type import NOT_GIVEN, Body, Data, Headers, NotGiven, Query, RequestFiles, ResponseT from ._errors import APIResponseValidationError, APIStatusError, APITimeoutError from ._files import make_httpx_files from ._request_opt import ClientRequestParam, UserRequestInput from ._response import HttpResponse from ._sse_client import StreamResponse from ._utils import flatten from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT Query = Mapping[str, object] class NotGiven(pydantic.BaseModel): """ A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior). For example: ```py def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ... get(timeout=1) # 1s timeout get(timeout=None) # No timeout get() # Default timeout behavior, which may not be statically known at the method definition. ``` """ def __bool__(self) -> Literal[False]: return False def __repr__(self) -> str: return "NOT_GIVEN" NOT_GIVEN = NotGiven() Headers = Mapping[str, Union[str, Omit]] class UserRequestInput(TypedDict, total=False): max_retries: int timeout: float | Timeout | None headers: Headers params: Query | None def make_user_request_input( max_retries: int | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, extra_headers: Headers = None, query: Query | None = None, ) -> UserRequestInput: options: UserRequestInput = {} if extra_headers is not None: options["headers"] = extra_headers if max_retries is not None: options["max_retries"] = max_retries if not isinstance(timeout, NotGiven): options['timeout'] = timeout if query is not None: options["params"] = query return options
null
16,839
from __future__ import annotations import io import os from collections.abc import Mapping, Sequence from pathlib import Path from ._base_type import FileTypes, HttpxFileTypes, HttpxRequestFiles, RequestFiles def _transform_file(file: FileTypes) -> HttpxFileTypes: if is_file_content(file): if isinstance(file, os.PathLike): path = Path(file) return path.name, path.read_bytes() else: return file if isinstance(file, tuple): if isinstance(file[1], os.PathLike): return (file[0], Path(file[1]).read_bytes(), *file[2:]) else: return (file[0], file[1], *file[2:]) else: raise TypeError(f"Unexpected input file with type {type(file)},Expected FileContent type or tuple type") RequestFiles = Union[Mapping[str, FileTypes], Sequence[tuple[str, FileTypes]]] HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[tuple[str, HttpxFileTypes]]] def make_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: if files is None: return None if isinstance(files, Mapping): files = {key: _transform_file(file) for key, file in files.items()} elif isinstance(files, Sequence): files = [(key, _transform_file(file)) for key, file in files] else: raise TypeError(f"Unexpected input file with type {type(files)}, excepted Mapping or Sequence") return files
null
16,840
import time import cachetools.func import jwt API_TOKEN_TTL_SECONDS = 3 * 60 def generate_token(apikey: str): try: api_key, secret = apikey.split(".") except Exception as e: raise Exception("invalid api_key", e) payload = { "api_key": api_key, "exp": int(round(time.time() * 1000)) + API_TOKEN_TTL_SECONDS * 1000, "timestamp": int(round(time.time() * 1000)), } ret = jwt.encode( payload, secret, algorithm="HS256", headers={"alg": "HS256", "sign_type": "SIGN"}, ) return ret
null
16,841
from pydantic import BaseModel from core.model_runtime.entities.defaults import PARAMETER_RULE_TEMPLATE from core.model_runtime.entities.llm_entities import LLMMode from core.model_runtime.entities.model_entities import ( AIModelEntity, DefaultParameterName, FetchFrom, I18nObject, ModelFeature, ModelPropertyKey, ModelType, ParameterRule, PriceConfig, ) PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = { DefaultParameterName.TEMPERATURE: { 'label': { 'en_US': 'Temperature', 'zh_Hans': '温度', }, 'type': 'float', 'help': { 'en_US': 'Controls randomness. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions.', 'zh_Hans': '温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。较高的温度会导致更多的随机完成。', }, 'required': False, 'default': 0.0, 'min': 0.0, 'max': 1.0, 'precision': 2, }, DefaultParameterName.TOP_P: { 'label': { 'en_US': 'Top P', 'zh_Hans': 'Top P', }, 'type': 'float', 'help': { 'en_US': 'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered.', 'zh_Hans': '通过核心采样控制多样性:0.5表示考虑了一半的所有可能性加权选项。', }, 'required': False, 'default': 1.0, 'min': 0.0, 'max': 1.0, 'precision': 2, }, DefaultParameterName.PRESENCE_PENALTY: { 'label': { 'en_US': 'Presence Penalty', 'zh_Hans': '存在惩罚', }, 'type': 'float', 'help': { 'en_US': 'Applies a penalty to the log-probability of tokens already in the text.', 'zh_Hans': '对文本中已有的标记的对数概率施加惩罚。', }, 'required': False, 'default': 0.0, 'min': 0.0, 'max': 1.0, 'precision': 2, }, DefaultParameterName.FREQUENCY_PENALTY: { 'label': { 'en_US': 'Frequency Penalty', 'zh_Hans': '频率惩罚', }, 'type': 'float', 'help': { 'en_US': 'Applies a penalty to the log-probability of tokens that appear in the text.', 'zh_Hans': '对文本中出现的标记的对数概率施加惩罚。', }, 'required': False, 'default': 0.0, 'min': 0.0, 'max': 1.0, 'precision': 2, }, DefaultParameterName.MAX_TOKENS: { 'label': { 'en_US': 'Max Tokens', 'zh_Hans': '最大标记', }, 'type': 'int', 'help': { 'en_US': 'The maximum number of tokens to generate. Requests can use up to 2048 tokens shared between prompt and completion.', 'zh_Hans': '要生成的标记的最大数量。请求可以使用最多2048个标记,这些标记在提示和完成之间共享。', }, 'required': False, 'default': 64, 'min': 1, 'max': 2048, 'precision': 0, }, DefaultParameterName.RESPONSE_FORMAT: { 'label': { 'en_US': 'Response Format', 'zh_Hans': '回复格式', }, 'type': 'string', 'help': { 'en_US': 'Set a response format, ensure the output from llm is a valid code block as possible, such as JSON, XML, etc.', 'zh_Hans': '设置一个返回格式,确保llm的输出尽可能是有效的代码块,如JSON、XML等', }, 'required': False, 'options': ['JSON', 'XML'], } } class DefaultParameterName(Enum): def value_of(cls, value: Any) -> 'DefaultParameterName': class ParameterRule(BaseModel): def _get_max_tokens(default: int, min_val: int, max_val: int) -> ParameterRule: rule = ParameterRule( name='max_tokens', **PARAMETER_RULE_TEMPLATE[DefaultParameterName.MAX_TOKENS], ) rule.default = default rule.min = min_val rule.max = max_val return rule
null
16,842
import dataclasses import datetime from collections import defaultdict, deque from collections.abc import Callable from decimal import Decimal from enum import Enum from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network from pathlib import Path, PurePath from re import Pattern from types import GeneratorType from typing import Any, Optional, Union from uuid import UUID from pydantic import BaseModel from pydantic.color import Color from pydantic.networks import AnyUrl, NameEmail from pydantic.types import SecretBytes, SecretStr from ._compat import PYDANTIC_V2, Url, _model_dump def isoformat(o: Union[datetime.date, datetime.time]) -> str: return o.isoformat()
null
16,843
import dataclasses import datetime from collections import defaultdict, deque from collections.abc import Callable from decimal import Decimal from enum import Enum from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network from pathlib import Path, PurePath from re import Pattern from types import GeneratorType from typing import Any, Optional, Union from uuid import UUID from pydantic import BaseModel from pydantic.color import Color from pydantic.networks import AnyUrl, NameEmail from pydantic.types import SecretBytes, SecretStr from ._compat import PYDANTIC_V2, Url, _model_dump The provided code snippet includes necessary dependencies for implementing the `decimal_encoder` function. Write a Python function `def decimal_encoder(dec_value: Decimal) -> Union[int, float]` to solve the following problem: Encodes a Decimal as int of there's no exponent, otherwise float This is useful when we use ConstrainedDecimal to represent Numeric(x,0) where a integer (but not int typed) is used. Encoding this as a float results in failed round-tripping between encode and parse. Our Id type is a prime example of this. >>> decimal_encoder(Decimal("1.0")) 1.0 >>> decimal_encoder(Decimal("1")) 1 Here is the function: def decimal_encoder(dec_value: Decimal) -> Union[int, float]: """ Encodes a Decimal as int of there's no exponent, otherwise float This is useful when we use ConstrainedDecimal to represent Numeric(x,0) where a integer (but not int typed) is used. Encoding this as a float results in failed round-tripping between encode and parse. Our Id type is a prime example of this. >>> decimal_encoder(Decimal("1.0")) 1.0 >>> decimal_encoder(Decimal("1")) 1 """ if dec_value.as_tuple().exponent >= 0: # type: ignore[operator] return int(dec_value) else: return float(dec_value)
Encodes a Decimal as int of there's no exponent, otherwise float This is useful when we use ConstrainedDecimal to represent Numeric(x,0) where a integer (but not int typed) is used. Encoding this as a float results in failed round-tripping between encode and parse. Our Id type is a prime example of this. >>> decimal_encoder(Decimal("1.0")) 1.0 >>> decimal_encoder(Decimal("1")) 1
16,844
import dataclasses import datetime from collections import defaultdict, deque from collections.abc import Callable from decimal import Decimal from enum import Enum from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network from pathlib import Path, PurePath from re import Pattern from types import GeneratorType from typing import Any, Optional, Union from uuid import UUID from pydantic import BaseModel from pydantic.color import Color from pydantic.networks import AnyUrl, NameEmail from pydantic.types import SecretBytes, SecretStr from ._compat import PYDANTIC_V2, Url, _model_dump encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE) def generate_encoders_by_class_tuples( type_encoder_map: dict[Any, Callable[[Any], Any]] ) -> dict[Callable[[Any], Any], tuple[Any, ...]]: encoders_by_class_tuples: dict[Callable[[Any], Any], tuple[Any, ...]] = defaultdict( tuple ) for type_, encoder in type_encoder_map.items(): encoders_by_class_tuples[encoder] += (type_,) return encoders_by_class_tuples
null
16,845
import dataclasses import datetime from collections import defaultdict, deque from collections.abc import Callable from decimal import Decimal from enum import Enum from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network from pathlib import Path, PurePath from re import Pattern from types import GeneratorType from typing import Any, Optional, Union from uuid import UUID from pydantic import BaseModel from pydantic.color import Color from pydantic.networks import AnyUrl, NameEmail from pydantic.types import SecretBytes, SecretStr from ._compat import PYDANTIC_V2, Url, _model_dump ENCODERS_BY_TYPE: dict[type[Any], Callable[[Any], Any]] = { bytes: lambda o: o.decode(), Color: str, datetime.date: isoformat, datetime.datetime: isoformat, datetime.time: isoformat, datetime.timedelta: lambda td: td.total_seconds(), Decimal: decimal_encoder, Enum: lambda o: o.value, frozenset: list, deque: list, GeneratorType: list, IPv4Address: str, IPv4Interface: str, IPv4Network: str, IPv6Address: str, IPv6Interface: str, IPv6Network: str, NameEmail: str, Path: str, Pattern: lambda o: o.pattern, SecretBytes: str, SecretStr: str, set: list, UUID: str, Url: str, AnyUrl: str, } encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE) PYDANTIC_V2 = PYDANTIC_VERSION.startswith("2.") if PYDANTIC_V2: from pydantic_core import Url as Url def _model_dump( model: BaseModel, mode: Literal["json", "python"] = "json", **kwargs: Any ) -> Any: else: from pydantic import AnyUrl as Url # noqa: F401 def _model_dump( model: BaseModel, mode: Literal["json", "python"] = "json", **kwargs: Any ) -> Any: def jsonable_encoder( obj: Any, by_alias: bool = True, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, custom_encoder: Optional[dict[Any, Callable[[Any], Any]]] = None, sqlalchemy_safe: bool = True, ) -> Any: custom_encoder = custom_encoder or {} if custom_encoder: if type(obj) in custom_encoder: return custom_encoder[type(obj)](obj) else: for encoder_type, encoder_instance in custom_encoder.items(): if isinstance(obj, encoder_type): return encoder_instance(obj) if isinstance(obj, BaseModel): # TODO: remove when deprecating Pydantic v1 encoders: dict[Any, Any] = {} if not PYDANTIC_V2: encoders = getattr(obj.__config__, "json_encoders", {}) # type: ignore[attr-defined] if custom_encoder: encoders.update(custom_encoder) obj_dict = _model_dump( obj, mode="json", include=None, exclude=None, by_alias=by_alias, exclude_unset=exclude_unset, exclude_none=exclude_none, exclude_defaults=exclude_defaults, ) if "__root__" in obj_dict: obj_dict = obj_dict["__root__"] return jsonable_encoder( obj_dict, exclude_none=exclude_none, exclude_defaults=exclude_defaults, # TODO: remove when deprecating Pydantic v1 custom_encoder=encoders, sqlalchemy_safe=sqlalchemy_safe, ) if dataclasses.is_dataclass(obj): obj_dict = dataclasses.asdict(obj) return jsonable_encoder( obj_dict, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, custom_encoder=custom_encoder, sqlalchemy_safe=sqlalchemy_safe, ) if isinstance(obj, Enum): return obj.value if isinstance(obj, PurePath): return str(obj) if isinstance(obj, str | int | float | type(None)): return obj if isinstance(obj, Decimal): return format(obj, 'f') if isinstance(obj, dict): encoded_dict = {} allowed_keys = set(obj.keys()) for key, value in obj.items(): if ( ( not sqlalchemy_safe or (not isinstance(key, str)) or (not key.startswith("_sa")) ) and (value is not None or not exclude_none) and key in allowed_keys ): encoded_key = jsonable_encoder( key, by_alias=by_alias, exclude_unset=exclude_unset, exclude_none=exclude_none, custom_encoder=custom_encoder, sqlalchemy_safe=sqlalchemy_safe, ) encoded_value = jsonable_encoder( value, by_alias=by_alias, exclude_unset=exclude_unset, exclude_none=exclude_none, custom_encoder=custom_encoder, sqlalchemy_safe=sqlalchemy_safe, ) encoded_dict[encoded_key] = encoded_value return encoded_dict if isinstance(obj, list | set | frozenset | GeneratorType | tuple | deque): encoded_list = [] for item in obj: encoded_list.append( jsonable_encoder( item, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, custom_encoder=custom_encoder, sqlalchemy_safe=sqlalchemy_safe, ) ) return encoded_list if type(obj) in ENCODERS_BY_TYPE: return ENCODERS_BY_TYPE[type(obj)](obj) for encoder, classes_tuple in encoders_by_class_tuples.items(): if isinstance(obj, classes_tuple): return encoder(obj) try: data = dict(obj) except Exception as e: errors: list[Exception] = [] errors.append(e) try: data = vars(obj) except Exception as e: errors.append(e) raise ValueError(errors) from e return jsonable_encoder( data, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, custom_encoder=custom_encoder, sqlalchemy_safe=sqlalchemy_safe, )
null
16,846
import pydantic from pydantic import BaseModel def dump_model(model: BaseModel) -> dict: if hasattr(pydantic, 'model_dump'): return pydantic.model_dump(model) else: return model.dict()
null
16,847
import enum from typing import Any, cast from langchain.schema import AIMessage, BaseMessage, FunctionMessage, HumanMessage, SystemMessage from pydantic import BaseModel from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, ImagePromptMessageContent, PromptMessage, SystemPromptMessage, TextPromptMessageContent, ToolPromptMessage, UserPromptMessage, ) class PromptMessageFileType(enum.Enum): IMAGE = 'image' def value_of(value): for member in PromptMessageFileType: if member.value == value: return member raise ValueError(f"No matching enum found for value '{value}'") class ImagePromptMessageFile(PromptMessageFile): class DETAIL(enum.Enum): LOW = 'low' HIGH = 'high' type: PromptMessageFileType = PromptMessageFileType.IMAGE detail: DETAIL = DETAIL.LOW class LCHumanMessageWithFiles(HumanMessage): # content: Union[str, list[Union[str, Dict]]] content: str files: list[PromptMessageFile] class TextPromptMessageContent(PromptMessageContent): """ Model class for text prompt message content. """ type: PromptMessageContentType = PromptMessageContentType.TEXT class ImagePromptMessageContent(PromptMessageContent): """ Model class for image prompt message content. """ class DETAIL(Enum): LOW = 'low' HIGH = 'high' type: PromptMessageContentType = PromptMessageContentType.IMAGE detail: DETAIL = DETAIL.LOW class PromptMessage(ABC, BaseModel): """ Model class for prompt message. """ role: PromptMessageRole content: Optional[str | list[PromptMessageContent]] = None name: Optional[str] = None class UserPromptMessage(PromptMessage): """ Model class for user prompt message. """ role: PromptMessageRole = PromptMessageRole.USER class AssistantPromptMessage(PromptMessage): """ Model class for assistant prompt message. """ class ToolCall(BaseModel): """ Model class for assistant prompt message tool call. """ class ToolCallFunction(BaseModel): """ Model class for assistant prompt message tool call function. """ name: str arguments: str id: str type: str function: ToolCallFunction role: PromptMessageRole = PromptMessageRole.ASSISTANT tool_calls: list[ToolCall] = [] class SystemPromptMessage(PromptMessage): """ Model class for system prompt message. """ role: PromptMessageRole = PromptMessageRole.SYSTEM class ToolPromptMessage(PromptMessage): """ Model class for tool prompt message. """ role: PromptMessageRole = PromptMessageRole.TOOL tool_call_id: str def lc_messages_to_prompt_messages(messages: list[BaseMessage]) -> list[PromptMessage]: prompt_messages = [] for message in messages: if isinstance(message, HumanMessage): if isinstance(message, LCHumanMessageWithFiles): file_prompt_message_contents = [] for file in message.files: if file.type == PromptMessageFileType.IMAGE: file = cast(ImagePromptMessageFile, file) file_prompt_message_contents.append(ImagePromptMessageContent( data=file.data, detail=ImagePromptMessageContent.DETAIL.HIGH if file.detail.value == "high" else ImagePromptMessageContent.DETAIL.LOW )) prompt_message_contents = [TextPromptMessageContent(data=message.content)] prompt_message_contents.extend(file_prompt_message_contents) prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) else: prompt_messages.append(UserPromptMessage(content=message.content)) elif isinstance(message, AIMessage): message_kwargs = { 'content': message.content } if 'function_call' in message.additional_kwargs: message_kwargs['tool_calls'] = [ AssistantPromptMessage.ToolCall( id=message.additional_kwargs['function_call']['id'], type='function', function=AssistantPromptMessage.ToolCall.ToolCallFunction( name=message.additional_kwargs['function_call']['name'], arguments=message.additional_kwargs['function_call']['arguments'] ) ) ] prompt_messages.append(AssistantPromptMessage(**message_kwargs)) elif isinstance(message, SystemMessage): prompt_messages.append(SystemPromptMessage(content=message.content)) elif isinstance(message, FunctionMessage): prompt_messages.append(ToolPromptMessage(content=message.content, tool_call_id=message.name)) return prompt_messages
null
16,848
import enum from typing import Any, cast from langchain.schema import AIMessage, BaseMessage, FunctionMessage, HumanMessage, SystemMessage from pydantic import BaseModel from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, ImagePromptMessageContent, PromptMessage, SystemPromptMessage, TextPromptMessageContent, ToolPromptMessage, UserPromptMessage, ) class TextPromptMessageContent(PromptMessageContent): """ Model class for text prompt message content. """ type: PromptMessageContentType = PromptMessageContentType.TEXT class ImagePromptMessageContent(PromptMessageContent): """ Model class for image prompt message content. """ class DETAIL(Enum): LOW = 'low' HIGH = 'high' type: PromptMessageContentType = PromptMessageContentType.IMAGE detail: DETAIL = DETAIL.LOW class PromptMessage(ABC, BaseModel): """ Model class for prompt message. """ role: PromptMessageRole content: Optional[str | list[PromptMessageContent]] = None name: Optional[str] = None class UserPromptMessage(PromptMessage): """ Model class for user prompt message. """ role: PromptMessageRole = PromptMessageRole.USER class AssistantPromptMessage(PromptMessage): """ Model class for assistant prompt message. """ class ToolCall(BaseModel): """ Model class for assistant prompt message tool call. """ class ToolCallFunction(BaseModel): """ Model class for assistant prompt message tool call function. """ name: str arguments: str id: str type: str function: ToolCallFunction role: PromptMessageRole = PromptMessageRole.ASSISTANT tool_calls: list[ToolCall] = [] class SystemPromptMessage(PromptMessage): """ Model class for system prompt message. """ role: PromptMessageRole = PromptMessageRole.SYSTEM class ToolPromptMessage(PromptMessage): """ Model class for tool prompt message. """ role: PromptMessageRole = PromptMessageRole.TOOL tool_call_id: str def prompt_messages_to_lc_messages(prompt_messages: list[PromptMessage]) -> list[BaseMessage]: messages = [] for prompt_message in prompt_messages: if isinstance(prompt_message, UserPromptMessage): if isinstance(prompt_message.content, str): messages.append(HumanMessage(content=prompt_message.content)) else: message_contents = [] for content in prompt_message.content: if isinstance(content, TextPromptMessageContent): message_contents.append(content.data) elif isinstance(content, ImagePromptMessageContent): message_contents.append({ 'type': 'image', 'data': content.data, 'detail': content.detail.value }) messages.append(HumanMessage(content=message_contents)) elif isinstance(prompt_message, AssistantPromptMessage): message_kwargs = { 'content': prompt_message.content } if prompt_message.tool_calls: message_kwargs['additional_kwargs'] = { 'function_call': { 'id': prompt_message.tool_calls[0].id, 'name': prompt_message.tool_calls[0].function.name, 'arguments': prompt_message.tool_calls[0].function.arguments } } messages.append(AIMessage(**message_kwargs)) elif isinstance(prompt_message, SystemPromptMessage): messages.append(SystemMessage(content=prompt_message.content)) elif isinstance(prompt_message, ToolPromptMessage): messages.append(FunctionMessage(name=prompt_message.tool_call_id, content=prompt_message.content)) return messages
null
16,849
import base64 from extensions.ext_database import db from libs import rsa from models.account import Tenant def obfuscated_token(token: str): return token[:6] + '*' * (len(token) - 8) + token[-2:]
null
16,850
import base64 from extensions.ext_database import db from libs import rsa from models.account import Tenant db = SQLAlchemy() class Tenant(db.Model): __tablename__ = 'tenants' __table_args__ = ( db.PrimaryKeyConstraint('id', name='tenant_pkey'), ) id = db.Column(UUID, server_default=db.text('uuid_generate_v4()')) name = db.Column(db.String(255), nullable=False) encrypt_public_key = db.Column(db.Text) plan = db.Column(db.String(255), nullable=False, server_default=db.text("'basic'::character varying")) status = db.Column(db.String(255), nullable=False, server_default=db.text("'normal'::character varying")) custom_config = db.Column(db.Text) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) def get_accounts(self) -> list[db.Model]: Account = db.Model return db.session.query(Account).filter( Account.id == TenantAccountJoin.account_id, TenantAccountJoin.tenant_id == self.id ).all() def custom_config_dict(self) -> dict: return json.loads(self.custom_config) if self.custom_config else {} def custom_config_dict(self, value: dict): self.custom_config = json.dumps(value) def encrypt_token(tenant_id: str, token: str): tenant = db.session.query(Tenant).filter(Tenant.id == tenant_id).first() encrypted_token = rsa.encrypt(token, tenant.encrypt_public_key) return base64.b64encode(encrypted_token).decode()
null
16,851
import base64 from extensions.ext_database import db from libs import rsa from models.account import Tenant def decrypt_token(tenant_id: str, token: str): return rsa.decrypt(base64.b64decode(token), tenant_id)
null
16,852
import base64 from extensions.ext_database import db from libs import rsa from models.account import Tenant def get_decrypt_decoding(tenant_id: str): return rsa.get_decrypt_decoding(tenant_id) def decrypt_token_with_decoding(token: str, rsa_key, cipher_rsa): return rsa.decrypt_token_with_decoding(base64.b64decode(token), rsa_key, cipher_rsa) def batch_decrypt_token(tenant_id: str, tokens: list[str]): rsa_key, cipher_rsa = rsa.get_decrypt_decoding(tenant_id) return [rsa.decrypt_token_with_decoding(base64.b64decode(token), rsa_key, cipher_rsa) for token in tokens]
null
16,853
import logging import random from core.entities.application_entities import ModelConfigEntity from core.model_runtime.errors.invoke import InvokeBadRequestError from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel from extensions.ext_hosting_provider import hosting_configuration from models.provider import ProviderType logger = logging.getLogger(__name__) class ModelConfigEntity(BaseModel): """ Model Config Entity. """ provider: str model: str model_schema: AIModelEntity mode: str provider_model_bundle: ProviderModelBundle credentials: dict[str, Any] = {} parameters: dict[str, Any] = {} stop: list[str] = [] class InvokeBadRequestError(InvokeError): """Raised when the Invoke returns bad request.""" description = "Bad Request Error" class OpenAIModerationModel(_CommonOpenAI, ModerationModel): """ Model class for OpenAI text moderation model. """ def _invoke(self, model: str, credentials: dict, text: str, user: Optional[str] = None) \ -> bool: """ Invoke moderation model :param model: model name :param credentials: model credentials :param text: text to moderate :param user: unique user id :return: false if text is safe, true otherwise """ # transform credentials to kwargs for model instance credentials_kwargs = self._to_credential_kwargs(credentials) # init model client client = OpenAI(**credentials_kwargs) # chars per chunk length = self._get_max_characters_per_chunk(model, credentials) text_chunks = [text[i:i + length] for i in range(0, len(text), length)] max_text_chunks = self._get_max_chunks(model, credentials) chunks = [text_chunks[i:i + max_text_chunks] for i in range(0, len(text_chunks), max_text_chunks)] for text_chunk in chunks: moderation_result = self._moderation_invoke(model=model, client=client, texts=text_chunk) for result in moderation_result.results: if result.flagged is True: return True return False def validate_credentials(self, model: str, credentials: dict) -> None: """ Validate model credentials :param model: model name :param credentials: model credentials :return: """ try: # transform credentials to kwargs for model instance credentials_kwargs = self._to_credential_kwargs(credentials) client = OpenAI(**credentials_kwargs) # call moderation model self._moderation_invoke( model=model, client=client, texts=['ping'], ) except Exception as ex: raise CredentialsValidateFailedError(str(ex)) def _moderation_invoke(self, model: str, client: OpenAI, texts: list[str]) -> ModerationCreateResponse: """ Invoke moderation model :param model: model name :param client: model client :param texts: texts to moderate :return: false if text is safe, true otherwise """ # call moderation model moderation_result = client.moderations.create(model=model, input=texts) return moderation_result def _get_max_characters_per_chunk(self, model: str, credentials: dict) -> int: """ Get max characters per chunk :param model: model name :param credentials: model credentials :return: max characters per chunk """ model_schema = self.get_model_schema(model, credentials) if model_schema and ModelPropertyKey.MAX_CHARACTERS_PER_CHUNK in model_schema.model_properties: return model_schema.model_properties[ModelPropertyKey.MAX_CHARACTERS_PER_CHUNK] return 2000 def _get_max_chunks(self, model: str, credentials: dict) -> int: """ Get max chunks for given embedding model :param model: model name :param credentials: model credentials :return: max chunks """ model_schema = self.get_model_schema(model, credentials) if model_schema and ModelPropertyKey.MAX_CHUNKS in model_schema.model_properties: return model_schema.model_properties[ModelPropertyKey.MAX_CHUNKS] return 1 hosting_configuration = HostingConfiguration() class ProviderType(Enum): CUSTOM = 'custom' SYSTEM = 'system' def value_of(value): for member in ProviderType: if member.value == value: return member raise ValueError(f"No matching enum found for value '{value}'") def check_moderation(model_config: ModelConfigEntity, text: str) -> bool: moderation_config = hosting_configuration.moderation_config if (moderation_config and moderation_config.enabled is True and 'openai' in hosting_configuration.provider_map and hosting_configuration.provider_map['openai'].enabled is True ): using_provider_type = model_config.provider_model_bundle.configuration.using_provider_type provider_name = model_config.provider if using_provider_type == ProviderType.SYSTEM \ and provider_name in moderation_config.providers: hosting_openai_config = hosting_configuration.provider_map['openai'] # 2000 text per chunk length = 2000 text_chunks = [text[i:i + length] for i in range(0, len(text), length)] if len(text_chunks) == 0: return True text_chunk = random.choice(text_chunks) try: model_type_instance = OpenAIModerationModel() moderation_result = model_type_instance.invoke( model='text-moderation-stable', credentials=hosting_openai_config.credentials, text=text_chunk ) if moderation_result is True: return True except Exception as ex: logger.exception(ex) raise InvokeBadRequestError('Rate limit exceeded, please try again later.') return False
null
16,854
import os from httpx import get as _get from httpx import head as _head from httpx import options as _options from httpx import patch as _patch from httpx import post as _post from httpx import put as _put from requests import delete as _delete httpx_proxies = { 'http://': SSRF_PROXY_HTTP_URL, 'https://': SSRF_PROXY_HTTPS_URL } if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL else None def get(url, *args, **kwargs): return _get(url=url, *args, proxies=httpx_proxies, **kwargs)
null
16,855
import os from httpx import get as _get from httpx import head as _head from httpx import options as _options from httpx import patch as _patch from httpx import post as _post from httpx import put as _put from requests import delete as _delete httpx_proxies = { 'http://': SSRF_PROXY_HTTP_URL, 'https://': SSRF_PROXY_HTTPS_URL } if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL else None def post(url, *args, **kwargs): return _post(url=url, *args, proxies=httpx_proxies, **kwargs)
null
16,856
import os from httpx import get as _get from httpx import head as _head from httpx import options as _options from httpx import patch as _patch from httpx import post as _post from httpx import put as _put from requests import delete as _delete httpx_proxies = { 'http://': SSRF_PROXY_HTTP_URL, 'https://': SSRF_PROXY_HTTPS_URL } if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL else None def put(url, *args, **kwargs): return _put(url=url, *args, proxies=httpx_proxies, **kwargs)
null
16,857
import os from httpx import get as _get from httpx import head as _head from httpx import options as _options from httpx import patch as _patch from httpx import post as _post from httpx import put as _put from requests import delete as _delete httpx_proxies = { 'http://': SSRF_PROXY_HTTP_URL, 'https://': SSRF_PROXY_HTTPS_URL } if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL else None def patch(url, *args, **kwargs): return _patch(url=url, *args, proxies=httpx_proxies, **kwargs)
null
16,858
import os from httpx import get as _get from httpx import head as _head from httpx import options as _options from httpx import patch as _patch from httpx import post as _post from httpx import put as _put from requests import delete as _delete requests_proxies = { 'http': SSRF_PROXY_HTTP_URL, 'https': SSRF_PROXY_HTTPS_URL } if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL else None def delete(url, *args, **kwargs): return _delete(url=url, *args, proxies=requests_proxies, **kwargs)
null
16,859
import os from httpx import get as _get from httpx import head as _head from httpx import options as _options from httpx import patch as _patch from httpx import post as _post from httpx import put as _put from requests import delete as _delete httpx_proxies = { 'http://': SSRF_PROXY_HTTP_URL, 'https://': SSRF_PROXY_HTTPS_URL } if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL else None def head(url, *args, **kwargs): return _head(url=url, *args, proxies=httpx_proxies, **kwargs)
null
16,860
import os from httpx import get as _get from httpx import head as _head from httpx import options as _options from httpx import patch as _patch from httpx import post as _post from httpx import put as _put from requests import delete as _delete httpx_proxies = { 'http://': SSRF_PROXY_HTTP_URL, 'https://': SSRF_PROXY_HTTPS_URL } if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL else None def options(url, *args, **kwargs): return _options(url=url, *args, proxies=httpx_proxies, **kwargs)
null
16,861
import concurrent.futures from typing import NamedTuple, Optional, cast class FileEncoding(NamedTuple): """A file encoding as the NamedTuple.""" encoding: Optional[str] """The encoding of the file.""" confidence: float """The confidence of the encoding.""" language: Optional[str] """The language of the file.""" The provided code snippet includes necessary dependencies for implementing the `detect_file_encodings` function. Write a Python function `def detect_file_encodings(file_path: str, timeout: int = 5) -> list[FileEncoding]` to solve the following problem: Try to detect the file encoding. Returns a list of `FileEncoding` tuples with the detected encodings ordered by confidence. Args: file_path: The path to the file to detect the encoding for. timeout: The timeout in seconds for the encoding detection. Here is the function: def detect_file_encodings(file_path: str, timeout: int = 5) -> list[FileEncoding]: """Try to detect the file encoding. Returns a list of `FileEncoding` tuples with the detected encodings ordered by confidence. Args: file_path: The path to the file to detect the encoding for. timeout: The timeout in seconds for the encoding detection. """ import chardet def read_and_detect(file_path: str) -> list[dict]: with open(file_path, "rb") as f: rawdata = f.read() return cast(list[dict], chardet.detect_all(rawdata)) with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(read_and_detect, file_path) try: encodings = future.result(timeout=timeout) except concurrent.futures.TimeoutError: raise TimeoutError( f"Timeout reached while detecting encoding for {file_path}" ) if all(encoding["encoding"] is None for encoding in encodings): raise RuntimeError(f"Could not detect encoding for {file_path}") return [FileEncoding(**enc) for enc in encodings if enc["encoding"] is not None]
Try to detect the file encoding. Returns a list of `FileEncoding` tuples with the detected encodings ordered by confidence. Args: file_path: The path to the file to detect the encoding for. timeout: The timeout in seconds for the encoding detection.
16,862
from __future__ import annotations import copy import logging import re from abc import ABC, abstractmethod from collections.abc import Callable, Collection, Iterable, Sequence, Set from dataclasses import dataclass from enum import Enum from typing import ( Any, Literal, Optional, TypedDict, TypeVar, Union, ) from core.rag.models.document import BaseDocumentTransformer, Document def _split_text_with_regex( text: str, separator: str, keep_separator: bool ) -> list[str]: # Now that we have the separator, split the text if separator: if keep_separator: # The parentheses in the pattern keep the delimiters in the result. _splits = re.split(f"({re.escape(separator)})", text) splits = [_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)] if len(_splits) % 2 == 0: splits += _splits[-1:] splits = [_splits[0]] + splits else: splits = re.split(separator, text) else: splits = list(text) return [s for s in splits if s != ""]
null
16,863
from __future__ import annotations import copy import logging import re from abc import ABC, abstractmethod from collections.abc import Callable, Collection, Iterable, Sequence, Set from dataclasses import dataclass from enum import Enum from typing import ( Any, Literal, Optional, TypedDict, TypeVar, Union, ) from core.rag.models.document import BaseDocumentTransformer, Document class Tokenizer: chunk_overlap: int tokens_per_chunk: int decode: Callable[[list[int]], str] encode: Callable[[str], list[int]] The provided code snippet includes necessary dependencies for implementing the `split_text_on_tokens` function. Write a Python function `def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> list[str]` to solve the following problem: Split incoming text and return chunks using tokenizer. Here is the function: def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> list[str]: """Split incoming text and return chunks using tokenizer.""" splits: list[str] = [] input_ids = tokenizer.encode(text) start_idx = 0 cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids)) chunk_ids = input_ids[start_idx:cur_idx] while start_idx < len(input_ids): splits.append(tokenizer.decode(chunk_ids)) start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids)) chunk_ids = input_ids[start_idx:cur_idx] return splits
Split incoming text and return chunks using tokenizer.
16,864
import uuid def is_valid_uuid(uuid_str: str) -> bool: try: uuid.UUID(uuid_str) return True except Exception: return False
null
16,865
import hashlib import json import os import re import site import subprocess import tempfile import unicodedata from contextlib import contextmanager import requests from bs4 import BeautifulSoup, CData, Comment, NavigableString from newspaper import Article from regex import regex from core.rag.extractor import extract_processor from core.rag.extractor.extract_processor import ExtractProcessor The provided code snippet includes necessary dependencies for implementing the `page_result` function. Write a Python function `def page_result(text: str, cursor: int, max_length: int) -> str` to solve the following problem: Page through `text` and return a substring of `max_length` characters starting from `cursor`. Here is the function: def page_result(text: str, cursor: int, max_length: int) -> str: """Page through `text` and return a substring of `max_length` characters starting from `cursor`.""" return text[cursor: cursor + max_length]
Page through `text` and return a substring of `max_length` characters starting from `cursor`.
16,866
import hashlib import json import os import re import site import subprocess import tempfile import unicodedata from contextlib import contextmanager import requests from bs4 import BeautifulSoup, CData, Comment, NavigableString from newspaper import Article from regex import regex from core.rag.extractor import extract_processor from core.rag.extractor.extract_processor import ExtractProcessor FULL_TEMPLATE = """ TITLE: {title} AUTHORS: {authors} PUBLISH DATE: {publish_date} TOP_IMAGE_URL: {top_image} TEXT: {text} """ def get_url_from_newspaper3k(url: str) -> str: a = Article(url) a.download() a.parse() res = FULL_TEMPLATE.format( title=a.title, authors=a.authors, publish_date=a.publish_date, top_image=a.top_image, text=a.text, ) return res def extract_using_readabilipy(html): with tempfile.NamedTemporaryFile(delete=False, mode='w+') as f_html: f_html.write(html) f_html.close() html_path = f_html.name # Call Mozilla's Readability.js Readability.parse() function via node, writing output to a temporary file article_json_path = html_path + ".json" jsdir = os.path.join(find_module_path('readabilipy'), 'javascript') with chdir(jsdir): subprocess.check_call(["node", "ExtractArticle.js", "-i", html_path, "-o", article_json_path]) # Read output of call to Readability.parse() from JSON file and return as Python dictionary with open(article_json_path, encoding="utf-8") as json_file: input_json = json.loads(json_file.read()) # Deleting files after processing os.unlink(article_json_path) os.unlink(html_path) article_json = { "title": None, "byline": None, "date": None, "content": None, "plain_content": None, "plain_text": None } # Populate article fields from readability fields where present if input_json: if "title" in input_json and input_json["title"]: article_json["title"] = input_json["title"] if "byline" in input_json and input_json["byline"]: article_json["byline"] = input_json["byline"] if "date" in input_json and input_json["date"]: article_json["date"] = input_json["date"] if "content" in input_json and input_json["content"]: article_json["content"] = input_json["content"] article_json["plain_content"] = plain_content(article_json["content"], False, False) article_json["plain_text"] = extract_text_blocks_as_plain_text(article_json["plain_content"]) if "textContent" in input_json and input_json["textContent"]: article_json["plain_text"] = input_json["textContent"] article_json["plain_text"] = re.sub(r'\n\s*\n', '\n', article_json["plain_text"]) return article_json class ExtractProcessor: def load_from_upload_file(cls, upload_file: UploadFile, return_text: bool = False, is_automatic: bool = False) \ -> Union[list[Document], str]: extract_setting = ExtractSetting( datasource_type="upload_file", upload_file=upload_file, document_model='text_model' ) if return_text: delimiter = '\n' return delimiter.join([document.page_content for document in cls.extract(extract_setting, is_automatic)]) else: return cls.extract(extract_setting, is_automatic) def load_from_url(cls, url: str, return_text: bool = False) -> Union[list[Document], str]: response = requests.get(url, headers={ "User-Agent": USER_AGENT }) with tempfile.TemporaryDirectory() as temp_dir: suffix = Path(url).suffix file_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}" with open(file_path, 'wb') as file: file.write(response.content) extract_setting = ExtractSetting( datasource_type="upload_file", document_model='text_model' ) if return_text: delimiter = '\n' return delimiter.join([document.page_content for document in cls.extract( extract_setting=extract_setting, file_path=file_path)]) else: return cls.extract(extract_setting=extract_setting, file_path=file_path) def extract(cls, extract_setting: ExtractSetting, is_automatic: bool = False, file_path: str = None) -> list[Document]: if extract_setting.datasource_type == DatasourceType.FILE.value: with tempfile.TemporaryDirectory() as temp_dir: if not file_path: upload_file: UploadFile = extract_setting.upload_file suffix = Path(upload_file.key).suffix file_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}" storage.download(upload_file.key, file_path) input_file = Path(file_path) file_extension = input_file.suffix.lower() etl_type = current_app.config['ETL_TYPE'] unstructured_api_url = current_app.config['UNSTRUCTURED_API_URL'] if etl_type == 'Unstructured': if file_extension == '.xlsx': extractor = ExcelExtractor(file_path) elif file_extension == '.pdf': extractor = PdfExtractor(file_path) elif file_extension in ['.md', '.markdown']: extractor = UnstructuredMarkdownExtractor(file_path, unstructured_api_url) if is_automatic \ else MarkdownExtractor(file_path, autodetect_encoding=True) elif file_extension in ['.htm', '.html']: extractor = HtmlExtractor(file_path) elif file_extension in ['.docx']: extractor = UnstructuredWordExtractor(file_path, unstructured_api_url) elif file_extension == '.csv': extractor = CSVExtractor(file_path, autodetect_encoding=True) elif file_extension == '.msg': extractor = UnstructuredMsgExtractor(file_path, unstructured_api_url) elif file_extension == '.eml': extractor = UnstructuredEmailExtractor(file_path, unstructured_api_url) elif file_extension == '.ppt': extractor = UnstructuredPPTExtractor(file_path, unstructured_api_url) elif file_extension == '.pptx': extractor = UnstructuredPPTXExtractor(file_path, unstructured_api_url) elif file_extension == '.xml': extractor = UnstructuredXmlExtractor(file_path, unstructured_api_url) else: # txt extractor = UnstructuredTextExtractor(file_path, unstructured_api_url) if is_automatic \ else TextExtractor(file_path, autodetect_encoding=True) else: if file_extension == '.xlsx': extractor = ExcelExtractor(file_path) elif file_extension == '.pdf': extractor = PdfExtractor(file_path) elif file_extension in ['.md', '.markdown']: extractor = MarkdownExtractor(file_path, autodetect_encoding=True) elif file_extension in ['.htm', '.html']: extractor = HtmlExtractor(file_path) elif file_extension in ['.docx']: extractor = WordExtractor(file_path) elif file_extension == '.csv': extractor = CSVExtractor(file_path, autodetect_encoding=True) else: # txt extractor = TextExtractor(file_path, autodetect_encoding=True) return extractor.extract() elif extract_setting.datasource_type == DatasourceType.NOTION.value: extractor = NotionExtractor( notion_workspace_id=extract_setting.notion_info.notion_workspace_id, notion_obj_id=extract_setting.notion_info.notion_obj_id, notion_page_type=extract_setting.notion_info.notion_page_type, document_model=extract_setting.notion_info.document, tenant_id=extract_setting.notion_info.tenant_id, ) return extractor.extract() else: raise ValueError(f"Unsupported datasource type: {extract_setting.datasource_type}") The provided code snippet includes necessary dependencies for implementing the `get_url` function. Write a Python function `def get_url(url: str, user_agent: str = None) -> str` to solve the following problem: Fetch URL and return the contents as a string. Here is the function: def get_url(url: str, user_agent: str = None) -> str: """Fetch URL and return the contents as a string.""" headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" } if user_agent: headers["User-Agent"] = user_agent supported_content_types = extract_processor.SUPPORT_URL_CONTENT_TYPES + ["text/html"] head_response = requests.head(url, headers=headers, allow_redirects=True, timeout=(5, 10)) if head_response.status_code != 200: return "URL returned status code {}.".format(head_response.status_code) # check content-type main_content_type = head_response.headers.get('Content-Type').split(';')[0].strip() if main_content_type not in supported_content_types: return "Unsupported content-type [{}] of URL.".format(main_content_type) if main_content_type in extract_processor.SUPPORT_URL_CONTENT_TYPES: return ExtractProcessor.load_from_url(url, return_text=True) response = requests.get(url, headers=headers, allow_redirects=True, timeout=(5, 30)) a = extract_using_readabilipy(response.text) if not a['plain_text'] or not a['plain_text'].strip(): return get_url_from_newspaper3k(url) res = FULL_TEMPLATE.format( title=a['title'], authors=a['byline'], publish_date=a['date'], top_image="", text=a['plain_text'] if a['plain_text'] else "", ) return res
Fetch URL and return the contents as a string.
16,867
from pydantic import BaseModel The provided code snippet includes necessary dependencies for implementing the `serialize_base_model_array` function. Write a Python function `def serialize_base_model_array(l: list[BaseModel]) -> str` to solve the following problem: {"__root__": [BaseModel, BaseModel, ...]} Here is the function: def serialize_base_model_array(l: list[BaseModel]) -> str: class _BaseModel(BaseModel): __root__: list[BaseModel] """ {"__root__": [BaseModel, BaseModel, ...]} """ return _BaseModel(__root__=l).json()
{"__root__": [BaseModel, BaseModel, ...]}
16,868
from pydantic import BaseModel The provided code snippet includes necessary dependencies for implementing the `serialize_base_model_dict` function. Write a Python function `def serialize_base_model_dict(b: dict) -> str` to solve the following problem: {"__root__": {BaseModel}} Here is the function: def serialize_base_model_dict(b: dict) -> str: class _BaseModel(BaseModel): __root__: dict """ {"__root__": {BaseModel}} """ return _BaseModel(__root__=b).json()
{"__root__": {BaseModel}}
16,869
import sqlalchemy as sa from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('app_model_configs', schema=None) as batch_op: batch_op.add_column(sa.Column('external_data_tools', sa.Text(), nullable=True)) # ### end Alembic commands ###
null
16,870
import sqlalchemy as sa from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('app_model_configs', schema=None) as batch_op: batch_op.drop_column('external_data_tools') # ### end Alembic commands ###
null
16,871
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('tool_providers', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('tool_name', sa.String(length=40), nullable=False), sa.Column('encrypted_credentials', sa.Text(), nullable=True), sa.Column('is_enabled', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='tool_provider_pkey'), sa.UniqueConstraint('tenant_id', 'tool_name', name='unique_tool_provider_tool_name') ) with op.batch_alter_table('app_model_configs', schema=None) as batch_op: batch_op.add_column(sa.Column('sensitive_word_avoidance', sa.Text(), nullable=True)) # ### end Alembic commands ###
null
16,872
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('app_model_configs', schema=None) as batch_op: batch_op.drop_column('sensitive_word_avoidance') op.drop_table('tool_providers') # ### end Alembic commands ###
null
16,873
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('dataset_collection_bindings', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('provider_name', sa.String(length=40), nullable=False), sa.Column('model_name', sa.String(length=40), nullable=False), sa.Column('collection_name', sa.String(length=64), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='dataset_collection_bindings_pkey') ) with op.batch_alter_table('dataset_collection_bindings', schema=None) as batch_op: batch_op.create_index('provider_model_name_idx', ['provider_name', 'model_name'], unique=False) with op.batch_alter_table('datasets', schema=None) as batch_op: batch_op.add_column(sa.Column('collection_binding_id', postgresql.UUID(), nullable=True)) # ### end Alembic commands ###
null
16,874
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('datasets', schema=None) as batch_op: batch_op.drop_column('collection_binding_id') with op.batch_alter_table('dataset_collection_bindings', schema=None) as batch_op: batch_op.drop_index('provider_model_name_idx') op.drop_table('dataset_collection_bindings') # ### end Alembic commands ###
null
16,875
import sqlalchemy as sa from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('accounts', schema=None) as batch_op: batch_op.add_column(sa.Column('last_active_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False)) # ### end Alembic commands ###
null
16,876
import sqlalchemy as sa from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('accounts', schema=None) as batch_op: batch_op.drop_column('last_active_at') # ### end Alembic commands ###
null
16,877
import sqlalchemy as sa from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('providers', schema=None) as batch_op: batch_op.alter_column('quota_limit', existing_type=sa.INTEGER(), type_=sa.BigInteger(), existing_nullable=True) batch_op.alter_column('quota_used', existing_type=sa.INTEGER(), type_=sa.BigInteger(), existing_nullable=True) # ### end Alembic commands ###
null
16,878
import sqlalchemy as sa from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('providers', schema=None) as batch_op: batch_op.alter_column('quota_used', existing_type=sa.BigInteger(), type_=sa.INTEGER(), existing_nullable=True) batch_op.alter_column('quota_limit', existing_type=sa.BigInteger(), type_=sa.INTEGER(), existing_nullable=True) # ### end Alembic commands ###
null
16,879
import sqlalchemy as sa from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('provider_models', schema=None) as batch_op: batch_op.alter_column('model_name', existing_type=sa.VARCHAR(length=40), type_=sa.String(length=255), existing_nullable=False) # ### end Alembic commands ###
null
16,880
import sqlalchemy as sa from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('provider_models', schema=None) as batch_op: batch_op.alter_column('model_name', existing_type=sa.String(length=255), type_=sa.VARCHAR(length=40), existing_nullable=False) # ### end Alembic commands ###
null
16,881
import sqlalchemy as sa from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('app_model_configs', schema=None) as batch_op: batch_op.add_column(sa.Column('prompt_type', sa.String(length=255), nullable=False, server_default='simple')) batch_op.add_column(sa.Column('chat_prompt_config', sa.Text(), nullable=True)) batch_op.add_column(sa.Column('completion_prompt_config', sa.Text(), nullable=True)) batch_op.add_column(sa.Column('dataset_configs', sa.Text(), nullable=True)) # ### end Alembic commands ###
null
16,882
import sqlalchemy as sa from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('app_model_configs', schema=None) as batch_op: batch_op.drop_column('dataset_configs') batch_op.drop_column('completion_prompt_config') batch_op.drop_column('chat_prompt_config') batch_op.drop_column('prompt_type') # ### end Alembic commands ###
null
16,883
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('provider_models', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('provider_name', sa.String(length=40), nullable=False), sa.Column('model_name', sa.String(length=40), nullable=False), sa.Column('model_type', sa.String(length=40), nullable=False), sa.Column('encrypted_config', sa.Text(), nullable=True), sa.Column('is_valid', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='provider_model_pkey'), sa.UniqueConstraint('tenant_id', 'provider_name', 'model_name', 'model_type', name='unique_provider_model_name') ) with op.batch_alter_table('provider_models', schema=None) as batch_op: batch_op.create_index('provider_model_tenant_id_provider_idx', ['tenant_id', 'provider_name'], unique=False) op.create_table('tenant_default_models', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('provider_name', sa.String(length=40), nullable=False), sa.Column('model_name', sa.String(length=40), nullable=False), sa.Column('model_type', sa.String(length=40), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='tenant_default_model_pkey') ) with op.batch_alter_table('tenant_default_models', schema=None) as batch_op: batch_op.create_index('tenant_default_model_tenant_id_provider_type_idx', ['tenant_id', 'provider_name', 'model_type'], unique=False) op.create_table('tenant_preferred_model_providers', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('provider_name', sa.String(length=40), nullable=False), sa.Column('preferred_provider_type', sa.String(length=40), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='tenant_preferred_model_provider_pkey') ) with op.batch_alter_table('tenant_preferred_model_providers', schema=None) as batch_op: batch_op.create_index('tenant_preferred_model_provider_tenant_provider_idx', ['tenant_id', 'provider_name'], unique=False) # ### end Alembic commands ###
null
16,884
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('tenant_preferred_model_providers', schema=None) as batch_op: batch_op.drop_index('tenant_preferred_model_provider_tenant_provider_idx') op.drop_table('tenant_preferred_model_providers') with op.batch_alter_table('tenant_default_models', schema=None) as batch_op: batch_op.drop_index('tenant_default_model_tenant_id_provider_type_idx') op.drop_table('tenant_default_models') with op.batch_alter_table('provider_models', schema=None) as batch_op: batch_op.drop_index('provider_model_tenant_id_provider_idx') op.drop_table('provider_models') # ### end Alembic commands ###
null
16,885
import sqlalchemy as sa from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('sites', schema=None) as batch_op: batch_op.alter_column('description', existing_type=sa.VARCHAR(length=255), type_=sa.Text(), existing_nullable=True) # ### end Alembic commands ###
null
16,886
import sqlalchemy as sa from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('sites', schema=None) as batch_op: batch_op.alter_column('description', existing_type=sa.Text(), type_=sa.VARCHAR(length=255), existing_nullable=True) # ### end Alembic commands ###
null
16,887
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp";') op.create_table('account_integrates', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('account_id', postgresql.UUID(), nullable=False), sa.Column('provider', sa.String(length=16), nullable=False), sa.Column('open_id', sa.String(length=255), nullable=False), sa.Column('encrypted_token', sa.String(length=255), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='account_integrate_pkey'), sa.UniqueConstraint('account_id', 'provider', name='unique_account_provider'), sa.UniqueConstraint('provider', 'open_id', name='unique_provider_open_id') ) op.create_table('accounts', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('email', sa.String(length=255), nullable=False), sa.Column('password', sa.String(length=255), nullable=True), sa.Column('password_salt', sa.String(length=255), nullable=True), sa.Column('avatar', sa.String(length=255), nullable=True), sa.Column('interface_language', sa.String(length=255), nullable=True), sa.Column('interface_theme', sa.String(length=255), nullable=True), sa.Column('timezone', sa.String(length=255), nullable=True), sa.Column('last_login_at', sa.DateTime(), nullable=True), sa.Column('last_login_ip', sa.String(length=255), nullable=True), sa.Column('status', sa.String(length=16), server_default=sa.text("'active'::character varying"), nullable=False), sa.Column('initialized_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='account_pkey') ) with op.batch_alter_table('accounts', schema=None) as batch_op: batch_op.create_index('account_email_idx', ['email'], unique=False) op.create_table('api_requests', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('api_token_id', postgresql.UUID(), nullable=False), sa.Column('path', sa.String(length=255), nullable=False), sa.Column('request', sa.Text(), nullable=True), sa.Column('response', sa.Text(), nullable=True), sa.Column('ip', sa.String(length=255), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='api_request_pkey') ) with op.batch_alter_table('api_requests', schema=None) as batch_op: batch_op.create_index('api_request_token_idx', ['tenant_id', 'api_token_id'], unique=False) op.create_table('api_tokens', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=True), sa.Column('dataset_id', postgresql.UUID(), nullable=True), sa.Column('type', sa.String(length=16), nullable=False), sa.Column('token', sa.String(length=255), nullable=False), sa.Column('last_used_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='api_token_pkey') ) with op.batch_alter_table('api_tokens', schema=None) as batch_op: batch_op.create_index('api_token_app_id_type_idx', ['app_id', 'type'], unique=False) batch_op.create_index('api_token_token_idx', ['token', 'type'], unique=False) op.create_table('app_dataset_joins', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('dataset_id', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), sa.PrimaryKeyConstraint('id', name='app_dataset_join_pkey') ) with op.batch_alter_table('app_dataset_joins', schema=None) as batch_op: batch_op.create_index('app_dataset_join_app_dataset_idx', ['dataset_id', 'app_id'], unique=False) op.create_table('app_model_configs', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('provider', sa.String(length=255), nullable=False), sa.Column('model_id', sa.String(length=255), nullable=False), sa.Column('configs', sa.JSON(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('opening_statement', sa.Text(), nullable=True), sa.Column('suggested_questions', sa.Text(), nullable=True), sa.Column('suggested_questions_after_answer', sa.Text(), nullable=True), sa.Column('more_like_this', sa.Text(), nullable=True), sa.Column('model', sa.Text(), nullable=True), sa.Column('user_input_form', sa.Text(), nullable=True), sa.Column('pre_prompt', sa.Text(), nullable=True), sa.Column('agent_mode', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('id', name='app_model_config_pkey') ) with op.batch_alter_table('app_model_configs', schema=None) as batch_op: batch_op.create_index('app_app_id_idx', ['app_id'], unique=False) op.create_table('apps', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('mode', sa.String(length=255), nullable=False), sa.Column('icon', sa.String(length=255), nullable=True), sa.Column('icon_background', sa.String(length=255), nullable=True), sa.Column('app_model_config_id', postgresql.UUID(), nullable=True), sa.Column('status', sa.String(length=255), server_default=sa.text("'normal'::character varying"), nullable=False), sa.Column('enable_site', sa.Boolean(), nullable=False), sa.Column('enable_api', sa.Boolean(), nullable=False), sa.Column('api_rpm', sa.Integer(), nullable=False), sa.Column('api_rph', sa.Integer(), nullable=False), sa.Column('is_demo', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.Column('is_public', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='app_pkey') ) with op.batch_alter_table('apps', schema=None) as batch_op: batch_op.create_index('app_tenant_id_idx', ['tenant_id'], unique=False) op.execute('CREATE SEQUENCE task_id_sequence;') op.execute('CREATE SEQUENCE taskset_id_sequence;') op.create_table('celery_taskmeta', sa.Column('id', sa.Integer(), nullable=False, server_default=sa.text('nextval(\'task_id_sequence\')')), sa.Column('task_id', sa.String(length=155), nullable=True), sa.Column('status', sa.String(length=50), nullable=True), sa.Column('result', sa.PickleType(), nullable=True), sa.Column('date_done', sa.DateTime(), nullable=True), sa.Column('traceback', sa.Text(), nullable=True), sa.Column('name', sa.String(length=155), nullable=True), sa.Column('args', sa.LargeBinary(), nullable=True), sa.Column('kwargs', sa.LargeBinary(), nullable=True), sa.Column('worker', sa.String(length=155), nullable=True), sa.Column('retries', sa.Integer(), nullable=True), sa.Column('queue', sa.String(length=155), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('task_id') ) op.create_table('celery_tasksetmeta', sa.Column('id', sa.Integer(), nullable=False, server_default=sa.text('nextval(\'taskset_id_sequence\')')), sa.Column('taskset_id', sa.String(length=155), nullable=True), sa.Column('result', sa.PickleType(), nullable=True), sa.Column('date_done', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('taskset_id') ) op.create_table('conversations', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('app_model_config_id', postgresql.UUID(), nullable=False), sa.Column('model_provider', sa.String(length=255), nullable=False), sa.Column('override_model_configs', sa.Text(), nullable=True), sa.Column('model_id', sa.String(length=255), nullable=False), sa.Column('mode', sa.String(length=255), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('summary', sa.Text(), nullable=True), sa.Column('inputs', sa.JSON(), nullable=True), sa.Column('introduction', sa.Text(), nullable=True), sa.Column('system_instruction', sa.Text(), nullable=True), sa.Column('system_instruction_tokens', sa.Integer(), server_default=sa.text('0'), nullable=False), sa.Column('status', sa.String(length=255), nullable=False), sa.Column('from_source', sa.String(length=255), nullable=False), sa.Column('from_end_user_id', postgresql.UUID(), nullable=True), sa.Column('from_account_id', postgresql.UUID(), nullable=True), sa.Column('read_at', sa.DateTime(), nullable=True), sa.Column('read_account_id', postgresql.UUID(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='conversation_pkey') ) with op.batch_alter_table('conversations', schema=None) as batch_op: batch_op.create_index('conversation_app_from_user_idx', ['app_id', 'from_source', 'from_end_user_id'], unique=False) op.create_table('dataset_keyword_tables', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('dataset_id', postgresql.UUID(), nullable=False), sa.Column('keyword_table', sa.Text(), nullable=False), sa.PrimaryKeyConstraint('id', name='dataset_keyword_table_pkey'), sa.UniqueConstraint('dataset_id') ) with op.batch_alter_table('dataset_keyword_tables', schema=None) as batch_op: batch_op.create_index('dataset_keyword_table_dataset_id_idx', ['dataset_id'], unique=False) op.create_table('dataset_process_rules', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('dataset_id', postgresql.UUID(), nullable=False), sa.Column('mode', sa.String(length=255), server_default=sa.text("'automatic'::character varying"), nullable=False), sa.Column('rules', sa.Text(), nullable=True), sa.Column('created_by', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='dataset_process_rule_pkey') ) with op.batch_alter_table('dataset_process_rules', schema=None) as batch_op: batch_op.create_index('dataset_process_rule_dataset_id_idx', ['dataset_id'], unique=False) op.create_table('dataset_queries', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('dataset_id', postgresql.UUID(), nullable=False), sa.Column('content', sa.Text(), nullable=False), sa.Column('source', sa.String(length=255), nullable=False), sa.Column('source_app_id', postgresql.UUID(), nullable=True), sa.Column('created_by_role', sa.String(), nullable=False), sa.Column('created_by', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), sa.PrimaryKeyConstraint('id', name='dataset_query_pkey') ) with op.batch_alter_table('dataset_queries', schema=None) as batch_op: batch_op.create_index('dataset_query_dataset_id_idx', ['dataset_id'], unique=False) op.create_table('datasets', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('description', sa.Text(), nullable=True), sa.Column('provider', sa.String(length=255), server_default=sa.text("'vendor'::character varying"), nullable=False), sa.Column('permission', sa.String(length=255), server_default=sa.text("'only_me'::character varying"), nullable=False), sa.Column('data_source_type', sa.String(length=255), nullable=True), sa.Column('indexing_technique', sa.String(length=255), nullable=True), sa.Column('index_struct', sa.Text(), nullable=True), sa.Column('created_by', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_by', postgresql.UUID(), nullable=True), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='dataset_pkey') ) with op.batch_alter_table('datasets', schema=None) as batch_op: batch_op.create_index('dataset_tenant_idx', ['tenant_id'], unique=False) op.create_table('dify_setups', sa.Column('version', sa.String(length=255), nullable=False), sa.Column('setup_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('version', name='dify_setup_pkey') ) op.create_table('document_segments', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('dataset_id', postgresql.UUID(), nullable=False), sa.Column('document_id', postgresql.UUID(), nullable=False), sa.Column('position', sa.Integer(), nullable=False), sa.Column('content', sa.Text(), nullable=False), sa.Column('word_count', sa.Integer(), nullable=False), sa.Column('tokens', sa.Integer(), nullable=False), sa.Column('keywords', sa.JSON(), nullable=True), sa.Column('index_node_id', sa.String(length=255), nullable=True), sa.Column('index_node_hash', sa.String(length=255), nullable=True), sa.Column('hit_count', sa.Integer(), nullable=False), sa.Column('enabled', sa.Boolean(), server_default=sa.text('true'), nullable=False), sa.Column('disabled_at', sa.DateTime(), nullable=True), sa.Column('disabled_by', postgresql.UUID(), nullable=True), sa.Column('status', sa.String(length=255), server_default=sa.text("'waiting'::character varying"), nullable=False), sa.Column('created_by', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('indexing_at', sa.DateTime(), nullable=True), sa.Column('completed_at', sa.DateTime(), nullable=True), sa.Column('error', sa.Text(), nullable=True), sa.Column('stopped_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id', name='document_segment_pkey') ) with op.batch_alter_table('document_segments', schema=None) as batch_op: batch_op.create_index('document_segment_dataset_id_idx', ['dataset_id'], unique=False) batch_op.create_index('document_segment_dataset_node_idx', ['dataset_id', 'index_node_id'], unique=False) batch_op.create_index('document_segment_document_id_idx', ['document_id'], unique=False) batch_op.create_index('document_segment_tenant_dataset_idx', ['dataset_id', 'tenant_id'], unique=False) batch_op.create_index('document_segment_tenant_document_idx', ['document_id', 'tenant_id'], unique=False) op.create_table('documents', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('dataset_id', postgresql.UUID(), nullable=False), sa.Column('position', sa.Integer(), nullable=False), sa.Column('data_source_type', sa.String(length=255), nullable=False), sa.Column('data_source_info', sa.Text(), nullable=True), sa.Column('dataset_process_rule_id', postgresql.UUID(), nullable=True), sa.Column('batch', sa.String(length=255), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('created_from', sa.String(length=255), nullable=False), sa.Column('created_by', postgresql.UUID(), nullable=False), sa.Column('created_api_request_id', postgresql.UUID(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('processing_started_at', sa.DateTime(), nullable=True), sa.Column('file_id', sa.Text(), nullable=True), sa.Column('word_count', sa.Integer(), nullable=True), sa.Column('parsing_completed_at', sa.DateTime(), nullable=True), sa.Column('cleaning_completed_at', sa.DateTime(), nullable=True), sa.Column('splitting_completed_at', sa.DateTime(), nullable=True), sa.Column('tokens', sa.Integer(), nullable=True), sa.Column('indexing_latency', sa.Float(), nullable=True), sa.Column('completed_at', sa.DateTime(), nullable=True), sa.Column('is_paused', sa.Boolean(), server_default=sa.text('false'), nullable=True), sa.Column('paused_by', postgresql.UUID(), nullable=True), sa.Column('paused_at', sa.DateTime(), nullable=True), sa.Column('error', sa.Text(), nullable=True), sa.Column('stopped_at', sa.DateTime(), nullable=True), sa.Column('indexing_status', sa.String(length=255), server_default=sa.text("'waiting'::character varying"), nullable=False), sa.Column('enabled', sa.Boolean(), server_default=sa.text('true'), nullable=False), sa.Column('disabled_at', sa.DateTime(), nullable=True), sa.Column('disabled_by', postgresql.UUID(), nullable=True), sa.Column('archived', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.Column('archived_reason', sa.String(length=255), nullable=True), sa.Column('archived_by', postgresql.UUID(), nullable=True), sa.Column('archived_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('doc_type', sa.String(length=40), nullable=True), sa.Column('doc_metadata', sa.JSON(), nullable=True), sa.PrimaryKeyConstraint('id', name='document_pkey') ) with op.batch_alter_table('documents', schema=None) as batch_op: batch_op.create_index('document_dataset_id_idx', ['dataset_id'], unique=False) batch_op.create_index('document_is_paused_idx', ['is_paused'], unique=False) op.create_table('embeddings', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('hash', sa.String(length=64), nullable=False), sa.Column('embedding', sa.LargeBinary(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='embedding_pkey'), sa.UniqueConstraint('hash', name='embedding_hash_idx') ) op.create_table('end_users', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=True), sa.Column('type', sa.String(length=255), nullable=False), sa.Column('external_user_id', sa.String(length=255), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('is_anonymous', sa.Boolean(), server_default=sa.text('true'), nullable=False), sa.Column('session_id', sa.String(length=255), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='end_user_pkey') ) with op.batch_alter_table('end_users', schema=None) as batch_op: batch_op.create_index('end_user_session_id_idx', ['session_id', 'type'], unique=False) batch_op.create_index('end_user_tenant_session_id_idx', ['tenant_id', 'session_id', 'type'], unique=False) op.create_table('installed_apps', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('app_owner_tenant_id', postgresql.UUID(), nullable=False), sa.Column('position', sa.Integer(), nullable=False), sa.Column('is_pinned', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.Column('last_used_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='installed_app_pkey'), sa.UniqueConstraint('tenant_id', 'app_id', name='unique_tenant_app') ) with op.batch_alter_table('installed_apps', schema=None) as batch_op: batch_op.create_index('installed_app_app_id_idx', ['app_id'], unique=False) batch_op.create_index('installed_app_tenant_id_idx', ['tenant_id'], unique=False) op.create_table('invitation_codes', sa.Column('id', sa.Integer(), nullable=False), sa.Column('batch', sa.String(length=255), nullable=False), sa.Column('code', sa.String(length=32), nullable=False), sa.Column('status', sa.String(length=16), server_default=sa.text("'unused'::character varying"), nullable=False), sa.Column('used_at', sa.DateTime(), nullable=True), sa.Column('used_by_tenant_id', postgresql.UUID(), nullable=True), sa.Column('used_by_account_id', postgresql.UUID(), nullable=True), sa.Column('deprecated_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='invitation_code_pkey') ) with op.batch_alter_table('invitation_codes', schema=None) as batch_op: batch_op.create_index('invitation_codes_batch_idx', ['batch'], unique=False) batch_op.create_index('invitation_codes_code_idx', ['code', 'status'], unique=False) op.create_table('message_agent_thoughts', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('message_id', postgresql.UUID(), nullable=False), sa.Column('message_chain_id', postgresql.UUID(), nullable=False), sa.Column('position', sa.Integer(), nullable=False), sa.Column('thought', sa.Text(), nullable=True), sa.Column('tool', sa.Text(), nullable=True), sa.Column('tool_input', sa.Text(), nullable=True), sa.Column('observation', sa.Text(), nullable=True), sa.Column('tool_process_data', sa.Text(), nullable=True), sa.Column('message', sa.Text(), nullable=True), sa.Column('message_token', sa.Integer(), nullable=True), sa.Column('message_unit_price', sa.Numeric(), nullable=True), sa.Column('answer', sa.Text(), nullable=True), sa.Column('answer_token', sa.Integer(), nullable=True), sa.Column('answer_unit_price', sa.Numeric(), nullable=True), sa.Column('tokens', sa.Integer(), nullable=True), sa.Column('total_price', sa.Numeric(), nullable=True), sa.Column('currency', sa.String(), nullable=True), sa.Column('latency', sa.Float(), nullable=True), sa.Column('created_by_role', sa.String(), nullable=False), sa.Column('created_by', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), sa.PrimaryKeyConstraint('id', name='message_agent_thought_pkey') ) with op.batch_alter_table('message_agent_thoughts', schema=None) as batch_op: batch_op.create_index('message_agent_thought_message_chain_id_idx', ['message_chain_id'], unique=False) batch_op.create_index('message_agent_thought_message_id_idx', ['message_id'], unique=False) op.create_table('message_chains', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('message_id', postgresql.UUID(), nullable=False), sa.Column('type', sa.String(length=255), nullable=False), sa.Column('input', sa.Text(), nullable=True), sa.Column('output', sa.Text(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), sa.PrimaryKeyConstraint('id', name='message_chain_pkey') ) with op.batch_alter_table('message_chains', schema=None) as batch_op: batch_op.create_index('message_chain_message_id_idx', ['message_id'], unique=False) op.create_table('message_feedbacks', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('conversation_id', postgresql.UUID(), nullable=False), sa.Column('message_id', postgresql.UUID(), nullable=False), sa.Column('rating', sa.String(length=255), nullable=False), sa.Column('content', sa.Text(), nullable=True), sa.Column('from_source', sa.String(length=255), nullable=False), sa.Column('from_end_user_id', postgresql.UUID(), nullable=True), sa.Column('from_account_id', postgresql.UUID(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='message_feedback_pkey') ) with op.batch_alter_table('message_feedbacks', schema=None) as batch_op: batch_op.create_index('message_feedback_app_idx', ['app_id'], unique=False) batch_op.create_index('message_feedback_conversation_idx', ['conversation_id', 'from_source', 'rating'], unique=False) batch_op.create_index('message_feedback_message_idx', ['message_id', 'from_source'], unique=False) op.create_table('operation_logs', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('account_id', postgresql.UUID(), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.Column('content', sa.JSON(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('created_ip', sa.String(length=255), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='operation_log_pkey') ) with op.batch_alter_table('operation_logs', schema=None) as batch_op: batch_op.create_index('operation_log_account_action_idx', ['tenant_id', 'account_id', 'action'], unique=False) op.create_table('pinned_conversations', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('conversation_id', postgresql.UUID(), nullable=False), sa.Column('created_by', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='pinned_conversation_pkey') ) with op.batch_alter_table('pinned_conversations', schema=None) as batch_op: batch_op.create_index('pinned_conversation_conversation_idx', ['app_id', 'conversation_id', 'created_by'], unique=False) op.create_table('providers', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('provider_name', sa.String(length=40), nullable=False), sa.Column('provider_type', sa.String(length=40), nullable=False, server_default=sa.text("'custom'::character varying")), sa.Column('encrypted_config', sa.Text(), nullable=True), sa.Column('is_valid', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.Column('last_used', sa.DateTime(), nullable=True), sa.Column('quota_type', sa.String(length=40), nullable=True, server_default=sa.text("''::character varying")), sa.Column('quota_limit', sa.Integer(), nullable=True), sa.Column('quota_used', sa.Integer(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='provider_pkey'), sa.UniqueConstraint('tenant_id', 'provider_name', 'provider_type', 'quota_type', name='unique_provider_name_type_quota') ) with op.batch_alter_table('providers', schema=None) as batch_op: batch_op.create_index('provider_tenant_id_provider_idx', ['tenant_id', 'provider_name'], unique=False) op.create_table('recommended_apps', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('description', sa.JSON(), nullable=False), sa.Column('copyright', sa.String(length=255), nullable=False), sa.Column('privacy_policy', sa.String(length=255), nullable=False), sa.Column('category', sa.String(length=255), nullable=False), sa.Column('position', sa.Integer(), nullable=False), sa.Column('is_listed', sa.Boolean(), nullable=False), sa.Column('install_count', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='recommended_app_pkey') ) with op.batch_alter_table('recommended_apps', schema=None) as batch_op: batch_op.create_index('recommended_app_app_id_idx', ['app_id'], unique=False) batch_op.create_index('recommended_app_is_listed_idx', ['is_listed'], unique=False) op.create_table('saved_messages', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('message_id', postgresql.UUID(), nullable=False), sa.Column('created_by', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='saved_message_pkey') ) with op.batch_alter_table('saved_messages', schema=None) as batch_op: batch_op.create_index('saved_message_message_idx', ['app_id', 'message_id', 'created_by'], unique=False) op.create_table('sessions', sa.Column('id', sa.Integer(), nullable=False), sa.Column('session_id', sa.String(length=255), nullable=True), sa.Column('data', sa.LargeBinary(), nullable=True), sa.Column('expiry', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('session_id') ) op.create_table('sites', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('title', sa.String(length=255), nullable=False), sa.Column('icon', sa.String(length=255), nullable=True), sa.Column('icon_background', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('default_language', sa.String(length=255), nullable=False), sa.Column('copyright', sa.String(length=255), nullable=True), sa.Column('privacy_policy', sa.String(length=255), nullable=True), sa.Column('customize_domain', sa.String(length=255), nullable=True), sa.Column('customize_token_strategy', sa.String(length=255), nullable=False), sa.Column('prompt_public', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.Column('status', sa.String(length=255), server_default=sa.text("'normal'::character varying"), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('code', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id', name='site_pkey') ) with op.batch_alter_table('sites', schema=None) as batch_op: batch_op.create_index('site_app_id_idx', ['app_id'], unique=False) batch_op.create_index('site_code_idx', ['code', 'status'], unique=False) op.create_table('tenant_account_joins', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('account_id', postgresql.UUID(), nullable=False), sa.Column('role', sa.String(length=16), server_default='normal', nullable=False), sa.Column('invited_by', postgresql.UUID(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='tenant_account_join_pkey'), sa.UniqueConstraint('tenant_id', 'account_id', name='unique_tenant_account_join') ) with op.batch_alter_table('tenant_account_joins', schema=None) as batch_op: batch_op.create_index('tenant_account_join_account_id_idx', ['account_id'], unique=False) batch_op.create_index('tenant_account_join_tenant_id_idx', ['tenant_id'], unique=False) op.create_table('tenants', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('encrypt_public_key', sa.Text(), nullable=True), sa.Column('plan', sa.String(length=255), server_default=sa.text("'basic'::character varying"), nullable=False), sa.Column('status', sa.String(length=255), server_default=sa.text("'normal'::character varying"), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='tenant_pkey') ) op.create_table('upload_files', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('storage_type', sa.String(length=255), nullable=False), sa.Column('key', sa.String(length=255), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('size', sa.Integer(), nullable=False), sa.Column('extension', sa.String(length=255), nullable=False), sa.Column('mime_type', sa.String(length=255), nullable=True), sa.Column('created_by', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('used', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.Column('used_by', postgresql.UUID(), nullable=True), sa.Column('used_at', sa.DateTime(), nullable=True), sa.Column('hash', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id', name='upload_file_pkey') ) with op.batch_alter_table('upload_files', schema=None) as batch_op: batch_op.create_index('upload_file_tenant_idx', ['tenant_id'], unique=False) op.create_table('message_annotations', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('conversation_id', postgresql.UUID(), nullable=False), sa.Column('message_id', postgresql.UUID(), nullable=False), sa.Column('content', sa.Text(), nullable=False), sa.Column('account_id', postgresql.UUID(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='message_annotation_pkey') ) with op.batch_alter_table('message_annotations', schema=None) as batch_op: batch_op.create_index('message_annotation_app_idx', ['app_id'], unique=False) batch_op.create_index('message_annotation_conversation_idx', ['conversation_id'], unique=False) batch_op.create_index('message_annotation_message_idx', ['message_id'], unique=False) op.create_table('messages', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('app_id', postgresql.UUID(), nullable=False), sa.Column('model_provider', sa.String(length=255), nullable=False), sa.Column('model_id', sa.String(length=255), nullable=False), sa.Column('override_model_configs', sa.Text(), nullable=True), sa.Column('conversation_id', postgresql.UUID(), nullable=False), sa.Column('inputs', sa.JSON(), nullable=True), sa.Column('query', sa.Text(), nullable=False), sa.Column('message', sa.JSON(), nullable=False), sa.Column('message_tokens', sa.Integer(), server_default=sa.text('0'), nullable=False), sa.Column('message_unit_price', sa.Numeric(precision=10, scale=4), nullable=False), sa.Column('answer', sa.Text(), nullable=False), sa.Column('answer_tokens', sa.Integer(), server_default=sa.text('0'), nullable=False), sa.Column('answer_unit_price', sa.Numeric(precision=10, scale=4), nullable=False), sa.Column('provider_response_latency', sa.Float(), server_default=sa.text('0'), nullable=False), sa.Column('total_price', sa.Numeric(precision=10, scale=7), nullable=True), sa.Column('currency', sa.String(length=255), nullable=False), sa.Column('from_source', sa.String(length=255), nullable=False), sa.Column('from_end_user_id', postgresql.UUID(), nullable=True), sa.Column('from_account_id', postgresql.UUID(), nullable=True), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('agent_based', sa.Boolean(), server_default=sa.text('false'), nullable=False), sa.PrimaryKeyConstraint('id', name='message_pkey') ) with op.batch_alter_table('messages', schema=None) as batch_op: batch_op.create_index('message_account_idx', ['app_id', 'from_source', 'from_account_id'], unique=False) batch_op.create_index('message_app_id_idx', ['app_id', 'created_at'], unique=False) batch_op.create_index('message_conversation_id_idx', ['conversation_id'], unique=False) batch_op.create_index('message_end_user_idx', ['app_id', 'from_source', 'from_end_user_id'], unique=False) # ### end Alembic commands ###
null
16,888
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('messages', schema=None) as batch_op: batch_op.drop_index('message_end_user_idx') batch_op.drop_index('message_conversation_id_idx') batch_op.drop_index('message_app_id_idx') batch_op.drop_index('message_account_idx') op.drop_table('messages') with op.batch_alter_table('message_annotations', schema=None) as batch_op: batch_op.drop_index('message_annotation_message_idx') batch_op.drop_index('message_annotation_conversation_idx') batch_op.drop_index('message_annotation_app_idx') op.drop_table('message_annotations') with op.batch_alter_table('upload_files', schema=None) as batch_op: batch_op.drop_index('upload_file_tenant_idx') op.drop_table('upload_files') op.drop_table('tenants') with op.batch_alter_table('tenant_account_joins', schema=None) as batch_op: batch_op.drop_index('tenant_account_join_tenant_id_idx') batch_op.drop_index('tenant_account_join_account_id_idx') op.drop_table('tenant_account_joins') with op.batch_alter_table('sites', schema=None) as batch_op: batch_op.drop_index('site_code_idx') batch_op.drop_index('site_app_id_idx') op.drop_table('sites') op.drop_table('sessions') with op.batch_alter_table('saved_messages', schema=None) as batch_op: batch_op.drop_index('saved_message_message_idx') op.drop_table('saved_messages') with op.batch_alter_table('recommended_apps', schema=None) as batch_op: batch_op.drop_index('recommended_app_is_listed_idx') batch_op.drop_index('recommended_app_app_id_idx') op.drop_table('recommended_apps') with op.batch_alter_table('providers', schema=None) as batch_op: batch_op.drop_index('provider_tenant_id_provider_idx') op.drop_table('providers') with op.batch_alter_table('pinned_conversations', schema=None) as batch_op: batch_op.drop_index('pinned_conversation_conversation_idx') op.drop_table('pinned_conversations') with op.batch_alter_table('operation_logs', schema=None) as batch_op: batch_op.drop_index('operation_log_account_action_idx') op.drop_table('operation_logs') with op.batch_alter_table('message_feedbacks', schema=None) as batch_op: batch_op.drop_index('message_feedback_message_idx') batch_op.drop_index('message_feedback_conversation_idx') batch_op.drop_index('message_feedback_app_idx') op.drop_table('message_feedbacks') with op.batch_alter_table('message_chains', schema=None) as batch_op: batch_op.drop_index('message_chain_message_id_idx') op.drop_table('message_chains') with op.batch_alter_table('message_agent_thoughts', schema=None) as batch_op: batch_op.drop_index('message_agent_thought_message_id_idx') batch_op.drop_index('message_agent_thought_message_chain_id_idx') op.drop_table('message_agent_thoughts') with op.batch_alter_table('invitation_codes', schema=None) as batch_op: batch_op.drop_index('invitation_codes_code_idx') batch_op.drop_index('invitation_codes_batch_idx') op.drop_table('invitation_codes') with op.batch_alter_table('installed_apps', schema=None) as batch_op: batch_op.drop_index('installed_app_tenant_id_idx') batch_op.drop_index('installed_app_app_id_idx') op.drop_table('installed_apps') with op.batch_alter_table('end_users', schema=None) as batch_op: batch_op.drop_index('end_user_tenant_session_id_idx') batch_op.drop_index('end_user_session_id_idx') op.drop_table('end_users') op.drop_table('embeddings') with op.batch_alter_table('documents', schema=None) as batch_op: batch_op.drop_index('document_is_paused_idx') batch_op.drop_index('document_dataset_id_idx') op.drop_table('documents') with op.batch_alter_table('document_segments', schema=None) as batch_op: batch_op.drop_index('document_segment_tenant_document_idx') batch_op.drop_index('document_segment_tenant_dataset_idx') batch_op.drop_index('document_segment_document_id_idx') batch_op.drop_index('document_segment_dataset_node_idx') batch_op.drop_index('document_segment_dataset_id_idx') op.drop_table('document_segments') op.drop_table('dify_setups') with op.batch_alter_table('datasets', schema=None) as batch_op: batch_op.drop_index('dataset_tenant_idx') op.drop_table('datasets') with op.batch_alter_table('dataset_queries', schema=None) as batch_op: batch_op.drop_index('dataset_query_dataset_id_idx') op.drop_table('dataset_queries') with op.batch_alter_table('dataset_process_rules', schema=None) as batch_op: batch_op.drop_index('dataset_process_rule_dataset_id_idx') op.drop_table('dataset_process_rules') with op.batch_alter_table('dataset_keyword_tables', schema=None) as batch_op: batch_op.drop_index('dataset_keyword_table_dataset_id_idx') op.drop_table('dataset_keyword_tables') with op.batch_alter_table('conversations', schema=None) as batch_op: batch_op.drop_index('conversation_app_from_user_idx') op.drop_table('conversations') op.drop_table('celery_tasksetmeta') op.drop_table('celery_taskmeta') op.execute('DROP SEQUENCE taskset_id_sequence;') op.execute('DROP SEQUENCE task_id_sequence;') with op.batch_alter_table('apps', schema=None) as batch_op: batch_op.drop_index('app_tenant_id_idx') op.drop_table('apps') with op.batch_alter_table('app_model_configs', schema=None) as batch_op: batch_op.drop_index('app_app_id_idx') op.drop_table('app_model_configs') with op.batch_alter_table('app_dataset_joins', schema=None) as batch_op: batch_op.drop_index('app_dataset_join_app_dataset_idx') op.drop_table('app_dataset_joins') with op.batch_alter_table('api_tokens', schema=None) as batch_op: batch_op.drop_index('api_token_token_idx') batch_op.drop_index('api_token_app_id_type_idx') op.drop_table('api_tokens') with op.batch_alter_table('api_requests', schema=None) as batch_op: batch_op.drop_index('api_request_token_idx') op.drop_table('api_requests') with op.batch_alter_table('accounts', schema=None) as batch_op: batch_op.drop_index('account_email_idx') op.drop_table('accounts') op.drop_table('account_integrates') op.execute('DROP EXTENSION IF EXISTS "uuid-ossp";') # ### end Alembic commands ###
null
16,889
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('data_source_bindings', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('access_token', sa.String(length=255), nullable=False), sa.Column('provider', sa.String(length=255), nullable=False), sa.Column('source_info', postgresql.JSONB(astext_type=sa.Text()), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('disabled', sa.Boolean(), server_default=sa.text('false'), nullable=True), sa.PrimaryKeyConstraint('id', name='source_binding_pkey') ) with op.batch_alter_table('data_source_bindings', schema=None) as batch_op: batch_op.create_index('source_binding_tenant_id_idx', ['tenant_id'], unique=False) batch_op.create_index('source_info_idx', ['source_info'], unique=False, postgresql_using='gin') # ### end Alembic commands ###
null
16,890
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('data_source_bindings', schema=None) as batch_op: batch_op.drop_index('source_info_idx', postgresql_using='gin') batch_op.drop_index('source_binding_tenant_id_idx') op.drop_table('data_source_bindings') # ### end Alembic commands ###
null
16,891
from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('tool_api_providers', schema=None) as batch_op: batch_op.create_unique_constraint('unique_api_tool_provider', ['name', 'tenant_id']) with op.batch_alter_table('tool_files', schema=None) as batch_op: batch_op.create_index('tool_file_conversation_id_idx', ['conversation_id'], unique=False) # ### end Alembic commands ###
null
16,892
from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('tool_files', schema=None) as batch_op: batch_op.drop_index('tool_file_conversation_id_idx') with op.batch_alter_table('tool_api_providers', schema=None) as batch_op: batch_op.drop_constraint('unique_api_tool_provider', type_='unique') # ### end Alembic commands ###
null
16,893
import sqlalchemy as sa from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('datasets', schema=None) as batch_op: batch_op.alter_column('embedding_model', existing_type=sa.VARCHAR(length=255), nullable=True, existing_server_default=sa.text("'text-embedding-ada-002'::character varying")) batch_op.alter_column('embedding_model_provider', existing_type=sa.VARCHAR(length=255), nullable=True, existing_server_default=sa.text("'openai'::character varying")) # ### end Alembic commands ###
null
16,894
import sqlalchemy as sa from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('datasets', schema=None) as batch_op: batch_op.alter_column('embedding_model_provider', existing_type=sa.VARCHAR(length=255), nullable=False, existing_server_default=sa.text("'openai'::character varying")) batch_op.alter_column('embedding_model', existing_type=sa.VARCHAR(length=255), nullable=False, existing_server_default=sa.text("'text-embedding-ada-002'::character varying")) # ### end Alembic commands ###
null
16,895
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('tool_conversation_variables', sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), sa.Column('user_id', postgresql.UUID(), nullable=False), sa.Column('tenant_id', postgresql.UUID(), nullable=False), sa.Column('conversation_id', postgresql.UUID(), nullable=False), sa.Column('variables_str', sa.Text(), nullable=False), sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), sa.PrimaryKeyConstraint('id', name='tool_conversation_variables_pkey') ) with op.batch_alter_table('tool_api_providers', schema=None) as batch_op: batch_op.add_column(sa.Column('privacy_policy', sa.String(length=255), nullable=True)) batch_op.alter_column('icon', existing_type=sa.VARCHAR(length=256), type_=sa.String(length=255), existing_nullable=False) # ### end Alembic commands ###
null
16,896
import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('tool_api_providers', schema=None) as batch_op: batch_op.alter_column('icon', existing_type=sa.String(length=255), type_=sa.VARCHAR(length=256), existing_nullable=False) batch_op.drop_column('privacy_policy') op.drop_table('tool_conversation_variables') # ### end Alembic commands ###
null
16,897
import sqlalchemy as sa from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('tool_api_providers', schema=None) as batch_op: batch_op.add_column(sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False)) batch_op.add_column(sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False)) # ### end Alembic commands ###
null
16,898
import sqlalchemy as sa from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('tool_api_providers', schema=None) as batch_op: batch_op.drop_column('updated_at') batch_op.drop_column('created_at') # ### end Alembic commands ###
null
16,899
import sqlalchemy as sa from alembic import op def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('embeddings', schema=None) as batch_op: batch_op.add_column(sa.Column('model_name', sa.String(length=40), server_default=sa.text("'text-embedding-ada-002'::character varying"), nullable=False)) batch_op.drop_constraint('embedding_hash_idx', type_='unique') batch_op.create_unique_constraint('embedding_hash_idx', ['model_name', 'hash']) # ### end Alembic commands ###
null
16,900
import sqlalchemy as sa from alembic import op def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('embeddings', schema=None) as batch_op: batch_op.drop_constraint('embedding_hash_idx', type_='unique') batch_op.create_unique_constraint('embedding_hash_idx', ['hash']) batch_op.drop_column('model_name') # ### end Alembic commands ###
null