id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
9,736 | import cv2
import torch
import torch.nn as nn
from torchvision.transforms import Compose
from ldm.modules.midas.midas.dpt_depth import DPTDepthModel
from ldm.modules.midas.midas.midas_net import MidasNet
from ldm.modules.midas.midas.midas_net_custom import MidasNet_small
from ldm.modules.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
ISL_PATHS = {
"dpt_large": "midas_models/dpt_large-midas-2f21e586.pt",
"dpt_hybrid": "midas_models/dpt_hybrid-midas-501f0c75.pt",
"midas_v21": "",
"midas_v21_small": "",
}
class DPTDepthModel(DPT):
def __init__(self, path=None, non_negative=True, **kwargs):
features = kwargs["features"] if "features" in kwargs else 256
head = nn.Sequential(
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
nn.Identity(),
)
super().__init__(head, **kwargs)
if path is not None:
self.load(path)
def forward(self, x):
return super().forward(x).squeeze(dim=1)
class MidasNet(BaseModel):
"""Network for monocular depth estimation.
"""
def __init__(self, path=None, features=256, non_negative=True):
"""Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
"""
print("Loading weights: ", path)
super(MidasNet, self).__init__()
use_pretrained = False if path is None else True
self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
self.scratch.refinenet4 = FeatureFusionBlock(features)
self.scratch.refinenet3 = FeatureFusionBlock(features)
self.scratch.refinenet2 = FeatureFusionBlock(features)
self.scratch.refinenet1 = FeatureFusionBlock(features)
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode="bilinear"),
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
)
if path:
self.load(path)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
"""
layer_1 = self.pretrained.layer1(x)
layer_2 = self.pretrained.layer2(layer_1)
layer_3 = self.pretrained.layer3(layer_2)
layer_4 = self.pretrained.layer4(layer_3)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return torch.squeeze(out, dim=1)
class MidasNet_small(BaseModel):
"""Network for monocular depth estimation.
"""
def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
blocks={'expand': True}):
"""Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
"""
print("Loading weights: ", path)
super(MidasNet_small, self).__init__()
use_pretrained = False if path else True
self.channels_last = channels_last
self.blocks = blocks
self.backbone = backbone
self.groups = 1
features1=features
features2=features
features3=features
features4=features
self.expand = False
if "expand" in self.blocks and self.blocks['expand'] == True:
self.expand = True
features1=features
features2=features*2
features3=features*4
features4=features*8
self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
self.scratch.activation = nn.ReLU(False)
self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
Interpolate(scale_factor=2, mode="bilinear"),
nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
self.scratch.activation,
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
nn.Identity(),
)
if path:
self.load(path)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
"""
if self.channels_last==True:
print("self.channels_last = ", self.channels_last)
x.contiguous(memory_format=torch.channels_last)
layer_1 = self.pretrained.layer1(x)
layer_2 = self.pretrained.layer2(layer_1)
layer_3 = self.pretrained.layer3(layer_2)
layer_4 = self.pretrained.layer4(layer_3)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return torch.squeeze(out, dim=1)
class Resize(object):
"""Resize sample to given size (width, height).
"""
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method="lower_bound",
image_interpolation_method=cv2.INTER_AREA,
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
self.__width = width
self.__height = height
self.__resize_target = resize_target
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method
self.__image_interpolation_method = image_interpolation_method
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
if max_val is not None and y > max_val:
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
if y < min_val:
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
return y
def get_size(self, width, height):
# determine new height and width
scale_height = self.__height / height
scale_width = self.__width / width
if self.__keep_aspect_ratio:
if self.__resize_method == "lower_bound":
# scale such that output size is lower bound
if scale_width > scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "upper_bound":
# scale such that output size is upper bound
if scale_width < scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "minimal":
# scale as least as possbile
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
else:
raise ValueError(
f"resize_method {self.__resize_method} not implemented"
)
if self.__resize_method == "lower_bound":
new_height = self.constrain_to_multiple_of(
scale_height * height, min_val=self.__height
)
new_width = self.constrain_to_multiple_of(
scale_width * width, min_val=self.__width
)
elif self.__resize_method == "upper_bound":
new_height = self.constrain_to_multiple_of(
scale_height * height, max_val=self.__height
)
new_width = self.constrain_to_multiple_of(
scale_width * width, max_val=self.__width
)
elif self.__resize_method == "minimal":
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
else:
raise ValueError(f"resize_method {self.__resize_method} not implemented")
return (new_width, new_height)
def __call__(self, sample):
width, height = self.get_size(
sample["image"].shape[1], sample["image"].shape[0]
)
# resize sample
sample["image"] = cv2.resize(
sample["image"],
(width, height),
interpolation=self.__image_interpolation_method,
)
if self.__resize_target:
if "disparity" in sample:
sample["disparity"] = cv2.resize(
sample["disparity"],
(width, height),
interpolation=cv2.INTER_NEAREST,
)
if "depth" in sample:
sample["depth"] = cv2.resize(
sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
)
sample["mask"] = cv2.resize(
sample["mask"].astype(np.float32),
(width, height),
interpolation=cv2.INTER_NEAREST,
)
sample["mask"] = sample["mask"].astype(bool)
return sample
class NormalizeImage(object):
"""Normlize image by given mean and std.
"""
def __init__(self, mean, std):
self.__mean = mean
self.__std = std
def __call__(self, sample):
sample["image"] = (sample["image"] - self.__mean) / self.__std
return sample
class PrepareForNet(object):
"""Prepare sample for usage as network input.
"""
def __init__(self):
pass
def __call__(self, sample):
image = np.transpose(sample["image"], (2, 0, 1))
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
if "mask" in sample:
sample["mask"] = sample["mask"].astype(np.float32)
sample["mask"] = np.ascontiguousarray(sample["mask"])
if "disparity" in sample:
disparity = sample["disparity"].astype(np.float32)
sample["disparity"] = np.ascontiguousarray(disparity)
if "depth" in sample:
depth = sample["depth"].astype(np.float32)
sample["depth"] = np.ascontiguousarray(depth)
return sample
def load_model(model_type):
# https://github.com/isl-org/MiDaS/blob/master/run.py
# load network
model_path = ISL_PATHS[model_type]
if model_type == "dpt_large": # DPT-Large
model = DPTDepthModel(
path=model_path,
backbone="vitl16_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": # DPT-Hybrid
model = DPTDepthModel(
path=model_path,
backbone="vitb_rn50_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "midas_v21":
model = MidasNet(model_path, non_negative=True)
net_w, net_h = 384, 384
resize_mode = "upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
elif model_type == "midas_v21_small":
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
non_negative=True, blocks={'expand': True})
net_w, net_h = 256, 256
resize_mode = "upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
else:
print(f"model_type '{model_type}' not implemented, use: --model_type large")
assert False
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method=resize_mode,
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
return model.eval(), transform | null |
9,747 | import torch
import torch.nn as nn
from .vit import (
_make_pretrained_vitb_rn50_384,
_make_pretrained_vitl16_384,
_make_pretrained_vitb16_384,
forward_vit,
)
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
def _make_pretrained_resnext101_wsl(use_pretrained):
def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
def _make_pretrained_vitb_rn50_384(
pretrained, use_readout="ignore", hooks=None, use_vit_only=False
):
def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
if backbone == "vitl16_384":
pretrained = _make_pretrained_vitl16_384(
use_pretrained, hooks=hooks, use_readout=use_readout
)
scratch = _make_scratch(
[256, 512, 1024, 1024], features, groups=groups, expand=expand
) # ViT-L/16 - 85.0% Top1 (backbone)
elif backbone == "vitb_rn50_384":
pretrained = _make_pretrained_vitb_rn50_384(
use_pretrained,
hooks=hooks,
use_vit_only=use_vit_only,
use_readout=use_readout,
)
scratch = _make_scratch(
[256, 512, 768, 768], features, groups=groups, expand=expand
) # ViT-H/16 - 85.0% Top1 (backbone)
elif backbone == "vitb16_384":
pretrained = _make_pretrained_vitb16_384(
use_pretrained, hooks=hooks, use_readout=use_readout
)
scratch = _make_scratch(
[96, 192, 384, 768], features, groups=groups, expand=expand
) # ViT-B/16 - 84.6% Top1 (backbone)
elif backbone == "resnext101_wsl":
pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
elif backbone == "efficientnet_lite3":
pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
else:
print(f"Backbone '{backbone}' not implemented")
assert False
return pretrained, scratch | null |
9,750 | import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel
import open_clip
from ldm.util import default, count_params
The provided code snippet includes necessary dependencies for implementing the `disabled_train` function. Write a Python function `def disabled_train(self, mode=True)` to solve the following problem:
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
Here is the function:
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self | Overwrite model.train with this function to make sure train/eval mode does not change anymore. |
9,751 | import importlib
import torch
from torch import optim
import numpy as np
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
def log_txt_as_img(wh, xc, size=10):
# wh a tuple of (width, height)
# xc a list of captions to plot
b = len(xc)
txts = list()
for bi in range(b):
txt = Image.new("RGB", wh, color="white")
draw = ImageDraw.Draw(txt)
font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)
nc = int(40 * (wh[0] / 256))
lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
try:
draw.text((0, 0), lines, fill="black", font=font)
except UnicodeEncodeError:
print("Cant encode string for logging. Skipping.")
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
txts.append(txt)
txts = np.stack(txts)
txts = torch.tensor(txts)
return txts | null |
9,752 | import importlib
import torch
from torch import optim
import numpy as np
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
def ismap(x):
if not isinstance(x, torch.Tensor):
return False
return (len(x.shape) == 4) and (x.shape[1] > 3) | null |
9,753 | import importlib
import torch
from torch import optim
import numpy as np
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
def isimage(x):
if not isinstance(x,torch.Tensor):
return False
return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) | null |
9,754 | import importlib
import torch
from torch import optim
import numpy as np
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d | null |
9,755 | import importlib
import torch
from torch import optim
import numpy as np
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
The provided code snippet includes necessary dependencies for implementing the `mean_flat` function. Write a Python function `def mean_flat(tensor)` to solve the following problem:
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 Take the mean over all non-batch dimensions.
Here is the function:
def mean_flat(tensor):
"""
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape)))) | https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 Take the mean over all non-batch dimensions. |
9,756 | import importlib
import torch
from torch import optim
import numpy as np
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
def count_params(model, verbose=False):
total_params = sum(p.numel() for p in model.parameters())
if verbose:
print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
return total_params | null |
9,757 | import torch
import torch.nn.functional as F
import math
from tqdm import tqdm
def expand_dims(v, dims):
"""
Expand the tensor `v` to the dim `dims`.
Args:
`v`: a PyTorch tensor with shape [N].
`dim`: a `int`.
Returns:
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
"""
return v[(...,) + (None,) * (dims - 1)]
The provided code snippet includes necessary dependencies for implementing the `model_wrapper` function. Write a Python function `def model_wrapper( model, noise_schedule, model_type="noise", model_kwargs={}, guidance_type="uncond", condition=None, unconditional_condition=None, guidance_scale=1., classifier_fn=None, classifier_kwargs={}, )` to solve the following problem:
Create a wrapper function for the noise prediction model. DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. We support four types of the diffusion model by setting `model_type`: 1. "noise": noise prediction model. (Trained by predicting noise). 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). 3. "v": velocity prediction model. (Trained by predicting the velocity). The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." arXiv preprint arXiv:2202.00512 (2022). [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." arXiv preprint arXiv:2210.02303 (2022). 4. "score": marginal score function. (Trained by denoising score matching). Note that the score function and the noise prediction model follows a simple relationship: ``` noise(x_t, t) = -sigma_t * score(x_t, t) ``` We support three types of guided sampling by DPMs by setting `guidance_type`: 1. "uncond": unconditional sampling by DPMs. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` The input `classifier_fn` has the following format: `` classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) `` [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. The input `model` has the following format: `` model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score `` And if cond == `unconditional_condition`, the model output is the unconditional DPM output. [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." arXiv preprint arXiv:2207.12598 (2022). The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) or continuous-time labels (i.e. epsilon to T). We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: `` def model_fn(x, t_continuous) -> noise: t_input = get_model_input_time(t_continuous) return noise_pred(model, x, t_input, **model_kwargs) `` where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. =============================================================== Args: model: A diffusion model with the corresponding format described above. noise_schedule: A noise schedule object, such as NoiseScheduleVP. model_type: A `str`. The parameterization type of the diffusion model. "noise" or "x_start" or "v" or "score". model_kwargs: A `dict`. A dict for the other inputs of the model function. guidance_type: A `str`. The type of the guidance for sampling. "uncond" or "classifier" or "classifier-free". condition: A pytorch tensor. The condition for the guided sampling. Only used for "classifier" or "classifier-free" guidance type. unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. Only used for "classifier-free" guidance type. guidance_scale: A `float`. The scale for the guided sampling. classifier_fn: A classifier function. Only used for the classifier guidance. classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. Returns: A noise prediction model that accepts the noised data and the continuous time as the inputs.
Here is the function:
def model_wrapper(
model,
noise_schedule,
model_type="noise",
model_kwargs={},
guidance_type="uncond",
condition=None,
unconditional_condition=None,
guidance_scale=1.,
classifier_fn=None,
classifier_kwargs={},
):
"""Create a wrapper function for the noise prediction model.
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
We support four types of the diffusion model by setting `model_type`:
1. "noise": noise prediction model. (Trained by predicting noise).
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
3. "v": velocity prediction model. (Trained by predicting the velocity).
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
arXiv preprint arXiv:2202.00512 (2022).
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
arXiv preprint arXiv:2210.02303 (2022).
4. "score": marginal score function. (Trained by denoising score matching).
Note that the score function and the noise prediction model follows a simple relationship:
```
noise(x_t, t) = -sigma_t * score(x_t, t)
```
We support three types of guided sampling by DPMs by setting `guidance_type`:
1. "uncond": unconditional sampling by DPMs.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
The input `classifier_fn` has the following format:
``
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
``
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
The input `model` has the following format:
``
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
``
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
arXiv preprint arXiv:2207.12598 (2022).
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
or continuous-time labels (i.e. epsilon to T).
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
``
def model_fn(x, t_continuous) -> noise:
t_input = get_model_input_time(t_continuous)
return noise_pred(model, x, t_input, **model_kwargs)
``
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
===============================================================
Args:
model: A diffusion model with the corresponding format described above.
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
model_type: A `str`. The parameterization type of the diffusion model.
"noise" or "x_start" or "v" or "score".
model_kwargs: A `dict`. A dict for the other inputs of the model function.
guidance_type: A `str`. The type of the guidance for sampling.
"uncond" or "classifier" or "classifier-free".
condition: A pytorch tensor. The condition for the guided sampling.
Only used for "classifier" or "classifier-free" guidance type.
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
Only used for "classifier-free" guidance type.
guidance_scale: A `float`. The scale for the guided sampling.
classifier_fn: A classifier function. Only used for the classifier guidance.
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
Returns:
A noise prediction model that accepts the noised data and the continuous time as the inputs.
"""
def get_model_input_time(t_continuous):
"""
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
For continuous-time DPMs, we just use `t_continuous`.
"""
if noise_schedule.schedule == 'discrete':
return (t_continuous - 1. / noise_schedule.total_N) * 1000.
else:
return t_continuous
def noise_pred_fn(x, t_continuous, cond=None):
if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0]))
t_input = get_model_input_time(t_continuous)
if cond is None:
output = model(x, t_input, **model_kwargs)
else:
output = model(x, t_input, cond, **model_kwargs)
if model_type == "noise":
return output
elif model_type == "x_start":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
elif model_type == "v":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
elif model_type == "score":
sigma_t = noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return -expand_dims(sigma_t, dims) * output
def cond_grad_fn(x, t_input):
"""
Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
"""
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
return torch.autograd.grad(log_prob.sum(), x_in)[0]
def model_fn(x, t_continuous):
"""
The noise predicition model function that is used for DPM-Solver.
"""
if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0]))
if guidance_type == "uncond":
return noise_pred_fn(x, t_continuous)
elif guidance_type == "classifier":
assert classifier_fn is not None
t_input = get_model_input_time(t_continuous)
cond_grad = cond_grad_fn(x, t_input)
sigma_t = noise_schedule.marginal_std(t_continuous)
noise = noise_pred_fn(x, t_continuous)
return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
elif guidance_type == "classifier-free":
if guidance_scale == 1. or unconditional_condition is None:
return noise_pred_fn(x, t_continuous, cond=condition)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t_continuous] * 2)
c_in = torch.cat([unconditional_condition, condition])
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
return noise_uncond + guidance_scale * (noise - noise_uncond)
assert model_type in ["noise", "x_start", "v"]
assert guidance_type in ["uncond", "classifier", "classifier-free"]
return model_fn | Create a wrapper function for the noise prediction model. DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. We support four types of the diffusion model by setting `model_type`: 1. "noise": noise prediction model. (Trained by predicting noise). 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). 3. "v": velocity prediction model. (Trained by predicting the velocity). The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." arXiv preprint arXiv:2202.00512 (2022). [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." arXiv preprint arXiv:2210.02303 (2022). 4. "score": marginal score function. (Trained by denoising score matching). Note that the score function and the noise prediction model follows a simple relationship: ``` noise(x_t, t) = -sigma_t * score(x_t, t) ``` We support three types of guided sampling by DPMs by setting `guidance_type`: 1. "uncond": unconditional sampling by DPMs. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` The input `classifier_fn` has the following format: `` classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) `` [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. The input `model` has the following format: `` model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score `` And if cond == `unconditional_condition`, the model output is the unconditional DPM output. [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." arXiv preprint arXiv:2207.12598 (2022). The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) or continuous-time labels (i.e. epsilon to T). We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: `` def model_fn(x, t_continuous) -> noise: t_input = get_model_input_time(t_continuous) return noise_pred(model, x, t_input, **model_kwargs) `` where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. =============================================================== Args: model: A diffusion model with the corresponding format described above. noise_schedule: A noise schedule object, such as NoiseScheduleVP. model_type: A `str`. The parameterization type of the diffusion model. "noise" or "x_start" or "v" or "score". model_kwargs: A `dict`. A dict for the other inputs of the model function. guidance_type: A `str`. The type of the guidance for sampling. "uncond" or "classifier" or "classifier-free". condition: A pytorch tensor. The condition for the guided sampling. Only used for "classifier" or "classifier-free" guidance type. unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. Only used for "classifier-free" guidance type. guidance_scale: A `float`. The scale for the guided sampling. classifier_fn: A classifier function. Only used for the classifier guidance. classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. Returns: A noise prediction model that accepts the noised data and the continuous time as the inputs. |
9,758 | import torch
import torch.nn.functional as F
import math
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `interpolate_fn` function. Write a Python function `def interpolate_fn(x, xp, yp)` to solve the following problem:
A piecewise linear function y = f(x), using xp and yp as keypoints. We implement f(x) in a differentiable way (i.e. applicable for autograd). The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) Args: x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. yp: PyTorch tensor with shape [C, K]. Returns: The function values f(x), with shape [N, C].
Here is the function:
def interpolate_fn(x, xp, yp):
"""
A piecewise linear function y = f(x), using xp and yp as keypoints.
We implement f(x) in a differentiable way (i.e. applicable for autograd).
The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
Args:
x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
yp: PyTorch tensor with shape [C, K].
Returns:
The function values f(x), with shape [N, C].
"""
N, K = x.shape[0], xp.shape[1]
all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
sorted_all_x, x_indices = torch.sort(all_x, dim=2)
x_idx = torch.argmin(x_indices, dim=2)
cand_start_idx = x_idx - 1
start_idx = torch.where(
torch.eq(x_idx, 0),
torch.tensor(1, device=x.device),
torch.where(
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
),
)
end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
start_idx2 = torch.where(
torch.eq(x_idx, 0),
torch.tensor(0, device=x.device),
torch.where(
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
),
)
y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
return cand | A piecewise linear function y = f(x), using xp and yp as keypoints. We implement f(x) in a differentiable way (i.e. applicable for autograd). The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) Args: x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. yp: PyTorch tensor with shape [C, K]. Returns: The function values f(x), with shape [N, C]. |
9,759 | import torch
import numpy as np
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions.
From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py"""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
return x[(...,) + (None,) * dims_to_append]
def norm_thresholding(x0, value):
s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
return x0 * (value / s) | null |
9,760 | import torch
import numpy as np
def spatial_norm_thresholding(x0, value):
# b c h w
s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)
return x0 * (value / s) | null |
9,761 | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
import itertools
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
The provided code snippet includes necessary dependencies for implementing the `disabled_train` function. Write a Python function `def disabled_train(self, mode=True)` to solve the following problem:
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
Here is the function:
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self | Overwrite model.train with this function to make sure train/eval mode does not change anymore. |
9,762 | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
import itertools
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2 | null |
9,763 | import json
import pandas as pd
import json
def render_list(s):
str_list = s.strip("[]").split(",")
return [ss.strip("' ") for ss in str_list] | null |
9,764 | import os
import urllib.request
from datetime import datetime
count = 0
readme_file = open("./README.md", 'w')
readme_file.write("-" + " " + "[" + folder + "]" + "(#" + folder + ")" + "\n"
readme_file.close()
The provided code snippet includes necessary dependencies for implementing the `helper` function. Write a Python function `def helper(folder, layer_index)` to solve the following problem:
深度优先遍历folder中的pdf文件和文件夹,写入Readme. Args: folder (string): 需要遍历的文件夹路径 layer_index (int): 文件夹深度索引,决定字体大小 Returns: None
Here is the function:
def helper(folder, layer_index):
"""
深度优先遍历folder中的pdf文件和文件夹,写入Readme.
Args:
folder (string): 需要遍历的文件夹路径
layer_index (int): 文件夹深度索引,决定字体大小
Returns:
None
"""
global count
file_list = []
dir_list = []
for f in os.listdir(folder):
if os.path.isfile(folder + "/" + f) and f != ".DS_Store":
file_list.append(f)
elif os.path.isdir(folder + "/" + f):
dir_list.append(f)
# 写入当前文件夹内的pdf
# 排序, 把已读论文排前面, 剩下的按字母顺序排列
file_list.sort(key=lambda x: x[1:5] if x[0] == "[" else x[0])
cur_folder = folder.split("/")[-1]
readme_file.write("#" * layer_index + " " + cur_folder + "\n")
for pdf in file_list:
pdf_url = urllib.request.quote(folder + "/" + pdf)
pdf = pdf.replace(".pdf", "")
readme_file.write("-" + " " + "[" + pdf + "]" + "(" + pdf_url + ")" + "\n")
count += 1
# 向下递归
if not dir_list:
return
else:
for dir in dir_list:
helper(folder + "/" + dir, layer_index + 2) | 深度优先遍历folder中的pdf文件和文件夹,写入Readme. Args: folder (string): 需要遍历的文件夹路径 layer_index (int): 文件夹深度索引,决定字体大小 Returns: None |
9,765 |
class BaiduTranslator(Translator):
def __init__(self) -> None:
super().__init__()
endpoint = "http://api.fanyi.baidu.com"
path = "/api/trans/vip/translate"
self.url = endpoint + path
self.appid = conf().get("baidu_translate_app_id")
self.appkey = conf().get("baidu_translate_app_key")
if not self.appid or not self.appkey:
raise Exception("baidu translate appid or appkey not set")
# For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`, need to convert to ISO 639-1 codes
def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
if not from_lang:
from_lang = "auto" # baidu suppport auto detect
salt = random.randint(32768, 65536)
sign = self.make_md5("{}{}{}{}".format(self.appid, query, salt, self.appkey))
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {"appid": self.appid, "q": query, "from": from_lang, "to": to_lang, "salt": salt, "sign": sign}
retry_cnt = 3
while retry_cnt:
r = requests.post(self.url, params=payload, headers=headers)
result = r.json()
errcode = result.get("error_code", "52000")
if errcode != "52000":
if errcode == "52001" or errcode == "52002":
retry_cnt -= 1
continue
else:
raise Exception(result["error_msg"])
else:
break
text = "\n".join([item["dst"] for item in result["trans_result"]])
return text
def make_md5(self, s, encoding="utf-8"):
return md5(s.encode(encoding)).hexdigest()
def create_translator(voice_type):
if voice_type == "baidu":
from translate.baidu.baidu_translate import BaiduTranslator
return BaiduTranslator()
raise RuntimeError | null |
9,766 | import os
import signal
import sys
import time
from channel import channel_factory
from common import const
from config import load_config
from plugins import *
import threading
def sigterm_handler_wrap(_signo):
def start_channel(channel_name: str):
}
def load_config():
[]
}
def run():
try:
# load config
load_config()
# ctrl + c
sigterm_handler_wrap(signal.SIGINT)
# kill signal
sigterm_handler_wrap(signal.SIGTERM)
# create channel
channel_name = conf().get("channel_type", "wx")
if "--cmd" in sys.argv:
channel_name = "terminal"
if channel_name == "wxy":
os.environ["WECHATY_LOG"] = "warn"
start_channel(channel_name)
while True:
time.sleep(1)
except Exception as e:
logger.error("App startup failed!")
logger.exception(e) | null |
9,767 | import re, os, sys, subprocess, copy, traceback, logging
import requests
from . import config
def print_line(msg, oneLine = False):
if oneLine:
sys.stdout.write(' '*40 + '\r')
sys.stdout.flush()
else:
sys.stdout.write('\n')
sys.stdout.write(msg.encode(sys.stdin.encoding or 'utf8', 'replace'
).decode(sys.stdin.encoding or 'utf8', 'replace'))
sys.stdout.flush() | null |
9,768 | import time, re, io
import json, copy
import logging
from .. import config, utils
from ..components.contact import accept_friend
from ..returnvalues import ReturnValue
from ..storage import contact_change
from ..utils import update_info_dict
def update_chatroom(self, userName, detailedMember=False):
if not isinstance(userName, list):
userName = [userName]
url = '%s/webwxbatchgetcontact?type=ex&r=%s' % (
self.loginInfo['url'], int(time.time()))
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Count': len(userName),
'List': [{
'UserName': u,
'ChatRoomId': '', } for u in userName], }
chatroomList = json.loads(self.s.post(url, data=json.dumps(data), headers=headers
).content.decode('utf8', 'replace')).get('ContactList')
if not chatroomList:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'No chatroom found',
'Ret': -1001, }})
if detailedMember:
def get_detailed_member_info(encryChatroomId, memberList):
url = '%s/webwxbatchgetcontact?type=ex&r=%s' % (
self.loginInfo['url'], int(time.time()))
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Count': len(memberList),
'List': [{
'UserName': member['UserName'],
'EncryChatRoomId': encryChatroomId} \
for member in memberList], }
return json.loads(self.s.post(url, data=json.dumps(data), headers=headers
).content.decode('utf8', 'replace'))['ContactList']
MAX_GET_NUMBER = 50
for chatroom in chatroomList:
totalMemberList = []
for i in range(int(len(chatroom['MemberList']) / MAX_GET_NUMBER + 1)):
memberList = chatroom['MemberList'][i*MAX_GET_NUMBER: (i+1)*MAX_GET_NUMBER]
totalMemberList += get_detailed_member_info(chatroom['EncryChatRoomId'], memberList)
chatroom['MemberList'] = totalMemberList
update_local_chatrooms(self, chatroomList)
r = [self.storageClass.search_chatrooms(userName=c['UserName'])
for c in chatroomList]
return r if 1 < len(r) else r[0]
def update_friend(self, userName):
if not isinstance(userName, list):
userName = [userName]
url = '%s/webwxbatchgetcontact?type=ex&r=%s' % (
self.loginInfo['url'], int(time.time()))
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Count': len(userName),
'List': [{
'UserName': u,
'EncryChatRoomId': '', } for u in userName], }
friendList = json.loads(self.s.post(url, data=json.dumps(data), headers=headers
).content.decode('utf8', 'replace')).get('ContactList')
update_local_friends(self, friendList)
r = [self.storageClass.search_friends(userName=f['UserName'])
for f in friendList]
return r if len(r) != 1 else r[0]
def get_contact(self, update=False):
if not update:
return utils.contact_deep_copy(self, self.chatroomList)
def _get_contact(seq=0):
url = '%s/webwxgetcontact?r=%s&seq=%s&skey=%s' % (self.loginInfo['url'],
int(time.time()), seq, self.loginInfo['skey'])
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
try:
r = self.s.get(url, headers=headers)
except:
logger.info('Failed to fetch contact, that may because of the amount of your chatrooms')
for chatroom in self.get_chatrooms():
self.update_chatroom(chatroom['UserName'], detailedMember=True)
return 0, []
j = json.loads(r.content.decode('utf-8', 'replace'))
return j.get('Seq', 0), j.get('MemberList')
seq, memberList = 0, []
while 1:
seq, batchMemberList = _get_contact(seq)
memberList.extend(batchMemberList)
if seq == 0:
break
chatroomList, otherList = [], []
for m in memberList:
if m['Sex'] != 0:
otherList.append(m)
elif '@@' in m['UserName']:
chatroomList.append(m)
elif '@' in m['UserName']:
# mp will be dealt in update_local_friends as well
otherList.append(m)
if chatroomList:
update_local_chatrooms(self, chatroomList)
if otherList:
update_local_friends(self, otherList)
return utils.contact_deep_copy(self, chatroomList)
def get_friends(self, update=False):
if update:
self.get_contact(update=True)
return utils.contact_deep_copy(self, self.memberList)
def get_chatrooms(self, update=False, contactOnly=False):
if contactOnly:
return self.get_contact(update=True)
else:
if update:
self.get_contact(True)
return utils.contact_deep_copy(self, self.chatroomList)
def get_mps(self, update=False):
if update: self.get_contact(update=True)
return utils.contact_deep_copy(self, self.mpList)
def set_alias(self, userName, alias):
oldFriendInfo = utils.search_dict_list(
self.memberList, 'UserName', userName)
if oldFriendInfo is None:
return ReturnValue({'BaseResponse': {
'Ret': -1001, }})
url = '%s/webwxoplog?lang=%s&pass_ticket=%s' % (
self.loginInfo['url'], 'zh_CN', self.loginInfo['pass_ticket'])
data = {
'UserName' : userName,
'CmdId' : 2,
'RemarkName' : alias,
'BaseRequest' : self.loginInfo['BaseRequest'], }
headers = { 'User-Agent' : config.USER_AGENT}
r = self.s.post(url, json.dumps(data, ensure_ascii=False).encode('utf8'),
headers=headers)
r = ReturnValue(rawResponse=r)
if r:
oldFriendInfo['RemarkName'] = alias
return r
def set_pinned(self, userName, isPinned=True):
url = '%s/webwxoplog?pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'UserName' : userName,
'CmdId' : 3,
'OP' : int(isPinned),
'BaseRequest' : self.loginInfo['BaseRequest'], }
headers = { 'User-Agent' : config.USER_AGENT}
r = self.s.post(url, json=data, headers=headers)
return ReturnValue(rawResponse=r)
def accept_friend(self, userName, v4= '', autoUpdate=True):
url = f"{self.loginInfo['url']}/webwxverifyuser?r={int(time.time())}&pass_ticket={self.loginInfo['pass_ticket']}"
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Opcode': 3, # 3
'VerifyUserListSize': 1,
'VerifyUserList': [{
'Value': userName,
'VerifyUserTicket': v4, }],
'VerifyContent': '',
'SceneListCount': 1,
'SceneList': [33],
'skey': self.loginInfo['skey'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8', 'replace'))
if autoUpdate:
self.update_friend(userName)
return ReturnValue(rawResponse=r)
def get_head_img(self, userName=None, chatroomUserName=None, picDir=None):
''' get head image
* if you want to get chatroom header: only set chatroomUserName
* if you want to get friend header: only set userName
* if you want to get chatroom member header: set both
'''
params = {
'userName': userName or chatroomUserName or self.storageClass.userName,
'skey': self.loginInfo['skey'],
'type': 'big', }
url = '%s/webwxgeticon' % self.loginInfo['url']
if chatroomUserName is None:
infoDict = self.storageClass.search_friends(userName=userName)
if infoDict is None:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'No friend found',
'Ret': -1001, }})
else:
if userName is None:
url = '%s/webwxgetheadimg' % self.loginInfo['url']
else:
chatroom = self.storageClass.search_chatrooms(userName=chatroomUserName)
if chatroomUserName is None:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'No chatroom found',
'Ret': -1001, }})
if 'EncryChatRoomId' in chatroom:
params['chatroomid'] = chatroom['EncryChatRoomId']
params['chatroomid'] = params.get('chatroomid') or chatroom['UserName']
headers = { 'User-Agent' : config.USER_AGENT}
r = self.s.get(url, params=params, stream=True, headers=headers)
tempStorage = io.BytesIO()
for block in r.iter_content(1024):
tempStorage.write(block)
if picDir is None:
return tempStorage.getvalue()
with open(picDir, 'wb') as f:
f.write(tempStorage.getvalue())
tempStorage.seek(0)
return ReturnValue({'BaseResponse': {
'ErrMsg': 'Successfully downloaded',
'Ret': 0, },
'PostFix': utils.get_image_postfix(tempStorage.read(20)), })
def create_chatroom(self, memberList, topic=''):
url = '%s/webwxcreatechatroom?pass_ticket=%s&r=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'], int(time.time()))
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'MemberCount': len(memberList.split(',')),
'MemberList': [{'UserName': member} for member in memberList.split(',')],
'Topic': topic, }
headers = {
'content-type': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8', 'ignore'))
return ReturnValue(rawResponse=r)
def set_chatroom_name(self, chatroomUserName, name):
url = '%s/webwxupdatechatroom?fun=modtopic&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'ChatRoomName': chatroomUserName,
'NewTopic': name, }
headers = {
'content-type': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8', 'ignore'))
return ReturnValue(rawResponse=r)
def delete_member_from_chatroom(self, chatroomUserName, memberList):
url = '%s/webwxupdatechatroom?fun=delmember&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'ChatRoomName': chatroomUserName,
'DelMemberList': ','.join([member['UserName'] for member in memberList]), }
headers = {
'content-type': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT}
r = self.s.post(url, data=json.dumps(data),headers=headers)
return ReturnValue(rawResponse=r)
def add_member_into_chatroom(self, chatroomUserName, memberList,
useInvitation=False):
''' add or invite member into chatroom
* there are two ways to get members into chatroom: invite or directly add
* but for chatrooms with more than 40 users, you can only use invite
* but don't worry we will auto-force userInvitation for you when necessary
'''
if not useInvitation:
chatroom = self.storageClass.search_chatrooms(userName=chatroomUserName)
if not chatroom: chatroom = self.update_chatroom(chatroomUserName)
if len(chatroom['MemberList']) > self.loginInfo['InviteStartCount']:
useInvitation = True
if useInvitation:
fun, memberKeyName = 'invitemember', 'InviteMemberList'
else:
fun, memberKeyName = 'addmember', 'AddMemberList'
url = '%s/webwxupdatechatroom?fun=%s&pass_ticket=%s' % (
self.loginInfo['url'], fun, self.loginInfo['pass_ticket'])
params = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'ChatRoomName' : chatroomUserName,
memberKeyName : memberList, }
headers = {
'content-type': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT}
r = self.s.post(url, data=json.dumps(params),headers=headers)
return ReturnValue(rawResponse=r)
def accept_friend(self, userName, v4='', autoUpdate=True):
url = f"{self.loginInfo['url']}/webwxverifyuser?r={int(time.time())}&pass_ticket={self.loginInfo['pass_ticket']}"
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Opcode': 3, # 3
'VerifyUserListSize': 1,
'VerifyUserList': [{
'Value': userName,
'VerifyUserTicket': v4, }],
'VerifyContent': '',
'SceneListCount': 1,
'SceneList': [33],
'skey': self.loginInfo['skey'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8', 'replace'))
if autoUpdate:
self.update_friend(userName)
return ReturnValue(rawResponse=r)
def load_contact(core):
core.update_chatroom = update_chatroom
core.update_friend = update_friend
core.get_contact = get_contact
core.get_friends = get_friends
core.get_chatrooms = get_chatrooms
core.get_mps = get_mps
core.set_alias = set_alias
core.set_pinned = set_pinned
core.accept_friend = accept_friend
core.get_head_img = get_head_img
core.create_chatroom = create_chatroom
core.set_chatroom_name = set_chatroom_name
core.delete_member_from_chatroom = delete_member_from_chatroom
core.add_member_into_chatroom = add_member_into_chatroom | null |
9,769 | import os, time, re, io
import json
import mimetypes, hashlib
import logging
from collections import OrderedDict
from .. import config, utils
from ..returnvalues import ReturnValue
from ..storage import templates
from .contact import update_local_uin
async def send_raw_msg(self, msgType, content, toUserName):
url = '%s/webwxsendmsg' % self.loginInfo['url']
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Msg': {
'Type': msgType,
'Content': content,
'FromUserName': self.storageClass.userName,
'ToUserName': (toUserName if toUserName else self.storageClass.userName),
'LocalID': int(time.time() * 1e4),
'ClientMsgId': int(time.time() * 1e4),
},
'Scene': 0, }
headers = { 'ContentType': 'application/json; charset=UTF-8', 'User-Agent' : config.USER_AGENT}
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
async def send_msg(self, msg='Test Message', toUserName=None):
logger.debug('Request to send a text message to %s: %s' % (toUserName, msg))
r = await self.send_raw_msg(1, msg, toUserName)
return r
def upload_file(self, fileDir, isPicture=False, isVideo=False,
toUserName='filehelper', file_=None, preparedFile=None):
logger.debug('Request to upload a %s: %s' % (
'picture' if isPicture else 'video' if isVideo else 'file', fileDir))
if not preparedFile:
preparedFile = _prepare_file(fileDir, file_)
if not preparedFile:
return preparedFile
fileSize, fileMd5, file_ = \
preparedFile['fileSize'], preparedFile['fileMd5'], preparedFile['file_']
fileSymbol = 'pic' if isPicture else 'video' if isVideo else'doc'
chunks = int((fileSize - 1) / 524288) + 1
clientMediaId = int(time.time() * 1e4)
uploadMediaRequest = json.dumps(OrderedDict([
('UploadType', 2),
('BaseRequest', self.loginInfo['BaseRequest']),
('ClientMediaId', clientMediaId),
('TotalLen', fileSize),
('StartPos', 0),
('DataLen', fileSize),
('MediaType', 4),
('FromUserName', self.storageClass.userName),
('ToUserName', toUserName),
('FileMd5', fileMd5)]
), separators = (',', ':'))
r = {'BaseResponse': {'Ret': -1005, 'ErrMsg': 'Empty file detected'}}
for chunk in range(chunks):
r = upload_chunk_file(self, fileDir, fileSymbol, fileSize,
file_, chunk, chunks, uploadMediaRequest)
file_.close()
if isinstance(r, dict):
return ReturnValue(r)
return ReturnValue(rawResponse=r)
async def send_file(self, fileDir, toUserName=None, mediaId=None, file_=None):
logger.debug('Request to send a file(mediaId: %s) to %s: %s' % (
mediaId, toUserName, fileDir))
if hasattr(fileDir, 'read'):
return ReturnValue({'BaseResponse': {
'ErrMsg': 'fileDir param should not be an opened file in send_file',
'Ret': -1005, }})
if toUserName is None:
toUserName = self.storageClass.userName
preparedFile = _prepare_file(fileDir, file_)
if not preparedFile:
return preparedFile
fileSize = preparedFile['fileSize']
if mediaId is None:
r = self.upload_file(fileDir, preparedFile=preparedFile)
if r:
mediaId = r['MediaId']
else:
return r
url = '%s/webwxsendappmsg?fun=async&f=json' % self.loginInfo['url']
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Msg': {
'Type': 6,
'Content': ("<appmsg appid='wxeb7ec651dd0aefa9' sdkver=''><title>%s</title>" % os.path.basename(fileDir) +
"<des></des><action></action><type>6</type><content></content><url></url><lowurl></lowurl>" +
"<appattach><totallen>%s</totallen><attachid>%s</attachid>" % (str(fileSize), mediaId) +
"<fileext>%s</fileext></appattach><extinfo></extinfo></appmsg>" % os.path.splitext(fileDir)[1].replace('.','')),
'FromUserName': self.storageClass.userName,
'ToUserName': toUserName,
'LocalID': int(time.time() * 1e4),
'ClientMsgId': int(time.time() * 1e4), },
'Scene': 0, }
headers = {
'User-Agent': config.USER_AGENT,
'Content-Type': 'application/json;charset=UTF-8', }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
async def send_image(self, fileDir=None, toUserName=None, mediaId=None, file_=None):
logger.debug('Request to send a image(mediaId: %s) to %s: %s' % (
mediaId, toUserName, fileDir))
if fileDir or file_:
if hasattr(fileDir, 'read'):
file_, fileDir = fileDir, None
if fileDir is None:
fileDir = 'tmp.jpg' # specific fileDir to send gifs
else:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'Either fileDir or file_ should be specific',
'Ret': -1005, }})
if toUserName is None:
toUserName = self.storageClass.userName
if mediaId is None:
r = self.upload_file(fileDir, isPicture=not fileDir[-4:] == '.gif', file_=file_)
if r:
mediaId = r['MediaId']
else:
return r
url = '%s/webwxsendmsgimg?fun=async&f=json' % self.loginInfo['url']
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Msg': {
'Type': 3,
'MediaId': mediaId,
'FromUserName': self.storageClass.userName,
'ToUserName': toUserName,
'LocalID': int(time.time() * 1e4),
'ClientMsgId': int(time.time() * 1e4), },
'Scene': 0, }
if fileDir[-4:] == '.gif':
url = '%s/webwxsendemoticon?fun=sys' % self.loginInfo['url']
data['Msg']['Type'] = 47
data['Msg']['EmojiFlag'] = 2
headers = {
'User-Agent': config.USER_AGENT,
'Content-Type': 'application/json;charset=UTF-8', }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
async def send_video(self, fileDir=None, toUserName=None, mediaId=None, file_=None):
logger.debug('Request to send a video(mediaId: %s) to %s: %s' % (
mediaId, toUserName, fileDir))
if fileDir or file_:
if hasattr(fileDir, 'read'):
file_, fileDir = fileDir, None
if fileDir is None:
fileDir = 'tmp.mp4' # specific fileDir to send other formats
else:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'Either fileDir or file_ should be specific',
'Ret': -1005, }})
if toUserName is None:
toUserName = self.storageClass.userName
if mediaId is None:
r = self.upload_file(fileDir, isVideo=True, file_=file_)
if r:
mediaId = r['MediaId']
else:
return r
url = '%s/webwxsendvideomsg?fun=async&f=json&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Msg': {
'Type' : 43,
'MediaId' : mediaId,
'FromUserName' : self.storageClass.userName,
'ToUserName' : toUserName,
'LocalID' : int(time.time() * 1e4),
'ClientMsgId' : int(time.time() * 1e4), },
'Scene': 0, }
headers = {
'User-Agent' : config.USER_AGENT,
'Content-Type': 'application/json;charset=UTF-8', }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
async def send(self, msg, toUserName=None, mediaId=None):
if not msg:
r = ReturnValue({'BaseResponse': {
'ErrMsg': 'No message.',
'Ret': -1005, }})
elif msg[:5] == '@fil@':
if mediaId is None:
r = await self.send_file(msg[5:], toUserName)
else:
r = await self.send_file(msg[5:], toUserName, mediaId)
elif msg[:5] == '@img@':
if mediaId is None:
r = await self.send_image(msg[5:], toUserName)
else:
r = await self.send_image(msg[5:], toUserName, mediaId)
elif msg[:5] == '@msg@':
r = await self.send_msg(msg[5:], toUserName)
elif msg[:5] == '@vid@':
if mediaId is None:
r = await self.send_video(msg[5:], toUserName)
else:
r = await self.send_video(msg[5:], toUserName, mediaId)
else:
r = await self.send_msg(msg, toUserName)
return r
async def revoke(self, msgId, toUserName, localId=None):
url = '%s/webwxrevokemsg' % self.loginInfo['url']
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
"ClientMsgId": localId or str(time.time() * 1e3),
"SvrMsgId": msgId,
"ToUserName": toUserName}
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
def load_messages(core):
core.send_raw_msg = send_raw_msg
core.send_msg = send_msg
core.upload_file = upload_file
core.send_file = send_file
core.send_image = send_image
core.send_video = send_video
core.send = send
core.revoke = revoke | null |
9,770 | import asyncio
import os, time, re, io
import threading
import json
import random
import traceback
import logging
import requests
from pyqrcode import QRCode
from .. import config, utils
from ..returnvalues import ReturnValue
from ..storage.templates import wrap_user_dict
from .contact import update_local_chatrooms, update_local_friends
from .messages import produce_msg
async def login(self, enableCmdQR=False, picDir=None, qrCallback=None, EventScanPayload=None,ScanStatus=None,event_stream=None,
loginCallback=None, exitCallback=None):
if self.alive or self.isLogging:
logger.warning('itchat has already logged in.')
return
self.isLogging = True
while self.isLogging:
uuid = await push_login(self)
if uuid:
payload = EventScanPayload(
status=ScanStatus.Waiting,
qrcode=f"qrcode/https://login.weixin.qq.com/l/{uuid}"
)
event_stream.emit('scan', payload)
await asyncio.sleep(0.1)
else:
logger.info('Getting uuid of QR code.')
self.get_QRuuid()
payload = EventScanPayload(
status=ScanStatus.Waiting,
qrcode=f"https://login.weixin.qq.com/l/{self.uuid}"
)
print(f"https://wechaty.js.org/qrcode/https://login.weixin.qq.com/l/{self.uuid}")
event_stream.emit('scan', payload)
await asyncio.sleep(0.1)
# logger.info('Please scan the QR code to log in.')
isLoggedIn = False
while not isLoggedIn:
status = await self.check_login()
# if hasattr(qrCallback, '__call__'):
# await qrCallback(uuid=self.uuid, status=status, qrcode=self.qrStorage.getvalue())
if status == '200':
isLoggedIn = True
payload = EventScanPayload(
status=ScanStatus.Scanned,
qrcode=f"https://login.weixin.qq.com/l/{self.uuid}"
)
event_stream.emit('scan', payload)
await asyncio.sleep(0.1)
elif status == '201':
if isLoggedIn is not None:
logger.info('Please press confirm on your phone.')
isLoggedIn = None
payload = EventScanPayload(
status=ScanStatus.Waiting,
qrcode=f"https://login.weixin.qq.com/l/{self.uuid}"
)
event_stream.emit('scan', payload)
await asyncio.sleep(0.1)
elif status != '408':
payload = EventScanPayload(
status=ScanStatus.Cancel,
qrcode=f"https://login.weixin.qq.com/l/{self.uuid}"
)
event_stream.emit('scan', payload)
await asyncio.sleep(0.1)
break
if isLoggedIn:
payload = EventScanPayload(
status=ScanStatus.Confirmed,
qrcode=f"https://login.weixin.qq.com/l/{self.uuid}"
)
event_stream.emit('scan', payload)
await asyncio.sleep(0.1)
break
elif self.isLogging:
logger.info('Log in time out, reloading QR code.')
payload = EventScanPayload(
status=ScanStatus.Timeout,
qrcode=f"https://login.weixin.qq.com/l/{self.uuid}"
)
event_stream.emit('scan', payload)
await asyncio.sleep(0.1)
else:
return
logger.info('Loading the contact, this may take a little while.')
await self.web_init()
await self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = await loginCallback(self.storageClass.userName)
else:
utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
await self.start_receiving(exitCallback)
self.isLogging = False
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid' : 'wx782c26e4c19acffb',
'fun' : 'new',
'redirect_uri' : 'https://wx.qq.com/cgi-bin/mmwebwx-bin/webwxnewloginpage?mod=desktop',
'lang' : 'zh_CN' }
headers = { 'User-Agent' : config.USER_AGENT}
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
async def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None):
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
qrStorage = io.BytesIO()
qrCode = QRCode('https://login.weixin.qq.com/l/' + uuid)
qrCode.png(qrStorage, scale=10)
if hasattr(qrCallback, '__call__'):
await qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue())
else:
with open(picDir, 'wb') as f:
f.write(qrStorage.getvalue())
if enableCmdQR:
utils.print_cmd_qr(qrCode.text(1), enableCmdQR=enableCmdQR)
else:
utils.print_qr(picDir)
return qrStorage
async def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
localTime = int(time.time())
params = 'loginicon=true&uuid=%s&tip=1&r=%s&_=%s' % (
uuid, int(-localTime / 1579), localTime)
headers = { 'User-Agent' : config.USER_AGENT}
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
if await process_login_info(self, r.text):
return '200'
else:
return '400'
elif data:
return data.group(1)
else:
return '400'
async def web_init(self):
url = '%s/webwxinit' % self.loginInfo['url']
params = {
'r': int(-time.time() / 1579),
'pass_ticket': self.loginInfo['pass_ticket'], }
data = { 'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, params=params, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
# deal with login info
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = wrap_user_dict(utils.struct_friend_info(dic['User']))
self.memberList.append(self.loginInfo['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
# deal with contact list returned when init
contactList = dic.get('ContactList', [])
chatroomList, otherList = [], []
for m in contactList:
if m['Sex'] != 0:
otherList.append(m)
elif '@@' in m['UserName']:
m['MemberList'] = [] # don't let dirty info pollute the list
chatroomList.append(m)
elif '@' in m['UserName']:
# mp will be dealt in update_local_friends as well
otherList.append(m)
if chatroomList:
update_local_chatrooms(self, chatroomList)
if otherList:
update_local_friends(self, otherList)
return dic
async def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'Code' : 3,
'FromUserName' : self.storageClass.userName,
'ToUserName' : self.storageClass.userName,
'ClientMsgId' : int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
async def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
pass
else:
msgList, contactList = self.get_msg()
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList:
self.msgList.put(msg)
if contactList:
chatroomList, otherList = [], []
for contact in contactList:
if '@@' in contact['UserName']:
chatroomList.append(contact)
else:
otherList.append(contact)
chatroomMsg = update_local_chatrooms(self, chatroomList)
chatroomMsg['User'] = self.loginInfo['User']
self.msgList.put(chatroomMsg)
update_local_friends(self, otherList)
retryCount = 0
except requests.exceptions.ReadTimeout:
pass
except:
retryCount += 1
logger.error(traceback.format_exc())
if self.receivingRetryCount < retryCount:
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback(self.storageClass.userName)
else:
logger.info('LOG OUT!')
if getReceivingFnOnly:
return maintain_loop
else:
maintainThread = threading.Thread(target=maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def get_msg(self):
self.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'],self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'SyncKey' : self.loginInfo['SyncKey'],
'rr' : ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, data=json.dumps(data), headers=headers, timeout=config.TIMEOUT)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0: return None, None
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect' : 1,
'type' : 1,
'skey' : self.loginInfo['skey'], }
headers = { 'User-Agent' : config.USER_AGENT}
self.s.get(url, params=params, headers=headers)
self.alive = False
self.isLogging = False
self.s.cookies.clear()
del self.chatroomList[:]
del self.memberList[:]
del self.mpList[:]
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout | null |
9,771 | import logging, traceback, sys, threading
from ..log import set_logging
from ..utils import test_connect
from ..storage import templates
async def auto_login(self, EventScanPayload=None,ScanStatus=None,event_stream=None,
hotReload=True, statusStorageDir='itchat.pkl',
enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if not test_connect():
logger.info("You can't get access to internet or wechat domain, so exit.")
sys.exit()
self.useHotReload = hotReload
self.hotReloadDir = statusStorageDir
if hotReload:
if await self.load_login_status(statusStorageDir,
loginCallback=loginCallback, exitCallback=exitCallback):
return
await self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, EventScanPayload=EventScanPayload, ScanStatus=ScanStatus, event_stream=event_stream,
loginCallback=loginCallback, exitCallback=exitCallback)
await self.dump_login_status(statusStorageDir)
else:
await self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, EventScanPayload=EventScanPayload, ScanStatus=ScanStatus, event_stream=event_stream,
loginCallback=loginCallback, exitCallback=exitCallback)
async def configured_reply(self, event_stream, payload, message_container):
''' determine the type of message and reply if its method is defined
however, I use a strange way to determine whether a msg is from massive platform
I haven't found a better solution here
The main problem I'm worrying about is the mismatching of new friends added on phone
If you have any good idea, pleeeease report an issue. I will be more than grateful.
'''
try:
msg = self.msgList.get(timeout=1)
if 'MsgId' in msg.keys():
message_container[msg['MsgId']] = msg
except Queue.Empty:
pass
else:
if isinstance(msg['User'], templates.User):
replyFn = self.functionDict['FriendChat'].get(msg['Type'])
elif isinstance(msg['User'], templates.MassivePlatform):
replyFn = self.functionDict['MpChat'].get(msg['Type'])
elif isinstance(msg['User'], templates.Chatroom):
replyFn = self.functionDict['GroupChat'].get(msg['Type'])
if replyFn is None:
r = None
else:
try:
r = await replyFn(msg)
if r is not None:
await self.send(r, msg.get('FromUserName'))
except:
logger.warning(traceback.format_exc())
def msg_register(self, msgType, isFriendChat=False, isGroupChat=False, isMpChat=False):
''' a decorator constructor
return a specific decorator based on information given '''
if not (isinstance(msgType, list) or isinstance(msgType, tuple)):
msgType = [msgType]
def _msg_register(fn):
for _msgType in msgType:
if isFriendChat:
self.functionDict['FriendChat'][_msgType] = fn
if isGroupChat:
self.functionDict['GroupChat'][_msgType] = fn
if isMpChat:
self.functionDict['MpChat'][_msgType] = fn
if not any((isFriendChat, isGroupChat, isMpChat)):
self.functionDict['FriendChat'][_msgType] = fn
return fn
return _msg_register
async def run(self, debug=False, blockThread=True):
logger.info('Start auto replying.')
if debug:
set_logging(loggingLevel=logging.DEBUG)
async def reply_fn():
try:
while self.alive:
await self.configured_reply()
except KeyboardInterrupt:
if self.useHotReload:
await self.dump_login_status()
self.alive = False
logger.debug('itchat received an ^C and exit.')
logger.info('Bye~')
if blockThread:
await reply_fn()
else:
replyThread = threading.Thread(target=reply_fn)
replyThread.setDaemon(True)
replyThread.start()
def load_register(core):
core.auto_login = auto_login
core.configured_reply = configured_reply
core.msg_register = msg_register
core.run = run | null |
9,772 | import pickle, os
import logging
import requests
from ..config import VERSION
from ..returnvalues import ReturnValue
from ..storage import templates
from .contact import update_local_chatrooms, update_local_friends
from .messages import produce_msg
async def dump_login_status(self, fileDir=None):
fileDir = fileDir or self.hotReloadDir
try:
with open(fileDir, 'w') as f:
f.write('itchat - DELETE THIS')
os.remove(fileDir)
except:
raise Exception('Incorrect fileDir')
status = {
'version' : VERSION,
'loginInfo' : self.loginInfo,
'cookies' : self.s.cookies.get_dict(),
'storage' : self.storageClass.dumps()}
with open(fileDir, 'wb') as f:
pickle.dump(status, f)
logger.debug('Dump login status for hot reload successfully.')
async def load_login_status(self, fileDir,
loginCallback=None, exitCallback=None):
try:
with open(fileDir, 'rb') as f:
j = pickle.load(f)
except Exception as e:
logger.debug('No such file, loading login status failed.')
return ReturnValue({'BaseResponse': {
'ErrMsg': 'No such file, loading login status failed.',
'Ret': -1002, }})
if j.get('version', '') != VERSION:
logger.debug(('you have updated itchat from %s to %s, ' +
'so cached status is ignored') % (
j.get('version', 'old version'), VERSION))
return ReturnValue({'BaseResponse': {
'ErrMsg': 'cached status ignored because of version',
'Ret': -1005, }})
self.loginInfo = j['loginInfo']
self.loginInfo['User'] = templates.User(self.loginInfo['User'])
self.loginInfo['User'].core = self
self.s.cookies = requests.utils.cookiejar_from_dict(j['cookies'])
self.storageClass.loads(j['storage'])
try:
msgList, contactList = self.get_msg()
except:
msgList = contactList = None
if (msgList or contactList) is None:
self.logout()
await load_last_login_status(self.s, j['cookies'])
logger.debug('server refused, loading login status failed.')
return ReturnValue({'BaseResponse': {
'ErrMsg': 'server refused, loading login status failed.',
'Ret': -1003, }})
else:
if contactList:
for contact in contactList:
if '@@' in contact['UserName']:
update_local_chatrooms(self, [contact])
else:
update_local_friends(self, [contact])
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList: self.msgList.put(msg)
await self.start_receiving(exitCallback)
logger.debug('loading login status succeeded.')
if hasattr(loginCallback, '__call__'):
await loginCallback(self.storageClass.userName)
return ReturnValue({'BaseResponse': {
'ErrMsg': 'loading login status succeeded.',
'Ret': 0, }})
def load_hotreload(core):
core.dump_login_status = dump_login_status
core.load_login_status = load_login_status | null |
9,773 | import time
import re
import io
import json
import copy
import logging
from .. import config, utils
from ..returnvalues import ReturnValue
from ..storage import contact_change
from ..utils import update_info_dict
def update_chatroom(self, userName, detailedMember=False):
if not isinstance(userName, list):
userName = [userName]
url = '%s/webwxbatchgetcontact?type=ex&r=%s' % (
self.loginInfo['url'], int(time.time()))
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Count': len(userName),
'List': [{
'UserName': u,
'ChatRoomId': '', } for u in userName], }
chatroomList = json.loads(self.s.post(url, data=json.dumps(data), headers=headers
).content.decode('utf8', 'replace')).get('ContactList')
if not chatroomList:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'No chatroom found',
'Ret': -1001, }})
if detailedMember:
def get_detailed_member_info(encryChatroomId, memberList):
url = '%s/webwxbatchgetcontact?type=ex&r=%s' % (
self.loginInfo['url'], int(time.time()))
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT, }
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Count': len(memberList),
'List': [{
'UserName': member['UserName'],
'EncryChatRoomId': encryChatroomId}
for member in memberList], }
return json.loads(self.s.post(url, data=json.dumps(data), headers=headers
).content.decode('utf8', 'replace'))['ContactList']
MAX_GET_NUMBER = 50
for chatroom in chatroomList:
totalMemberList = []
for i in range(int(len(chatroom['MemberList']) / MAX_GET_NUMBER + 1)):
memberList = chatroom['MemberList'][i *
MAX_GET_NUMBER: (i+1)*MAX_GET_NUMBER]
totalMemberList += get_detailed_member_info(
chatroom['EncryChatRoomId'], memberList)
chatroom['MemberList'] = totalMemberList
update_local_chatrooms(self, chatroomList)
r = [self.storageClass.search_chatrooms(userName=c['UserName'])
for c in chatroomList]
return r if 1 < len(r) else r[0]
def update_friend(self, userName):
if not isinstance(userName, list):
userName = [userName]
url = '%s/webwxbatchgetcontact?type=ex&r=%s' % (
self.loginInfo['url'], int(time.time()))
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Count': len(userName),
'List': [{
'UserName': u,
'EncryChatRoomId': '', } for u in userName], }
friendList = json.loads(self.s.post(url, data=json.dumps(data), headers=headers
).content.decode('utf8', 'replace')).get('ContactList')
update_local_friends(self, friendList)
r = [self.storageClass.search_friends(userName=f['UserName'])
for f in friendList]
return r if len(r) != 1 else r[0]
def get_contact(self, update=False):
if not update:
return utils.contact_deep_copy(self, self.chatroomList)
def _get_contact(seq=0):
url = '%s/webwxgetcontact?r=%s&seq=%s&skey=%s' % (self.loginInfo['url'],
int(time.time()), seq, self.loginInfo['skey'])
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT, }
try:
r = self.s.get(url, headers=headers)
except:
logger.info(
'Failed to fetch contact, that may because of the amount of your chatrooms')
for chatroom in self.get_chatrooms():
self.update_chatroom(chatroom['UserName'], detailedMember=True)
return 0, []
j = json.loads(r.content.decode('utf-8', 'replace'))
return j.get('Seq', 0), j.get('MemberList')
seq, memberList = 0, []
while 1:
seq, batchMemberList = _get_contact(seq)
memberList.extend(batchMemberList)
if seq == 0:
break
chatroomList, otherList = [], []
for m in memberList:
if m['Sex'] != 0:
otherList.append(m)
elif '@@' in m['UserName']:
chatroomList.append(m)
elif '@' in m['UserName']:
# mp will be dealt in update_local_friends as well
otherList.append(m)
if chatroomList:
update_local_chatrooms(self, chatroomList)
if otherList:
update_local_friends(self, otherList)
return utils.contact_deep_copy(self, chatroomList)
def get_friends(self, update=False):
if update:
self.get_contact(update=True)
return utils.contact_deep_copy(self, self.memberList)
def get_chatrooms(self, update=False, contactOnly=False):
if contactOnly:
return self.get_contact(update=True)
else:
if update:
self.get_contact(True)
return utils.contact_deep_copy(self, self.chatroomList)
def get_mps(self, update=False):
if update:
self.get_contact(update=True)
return utils.contact_deep_copy(self, self.mpList)
def set_alias(self, userName, alias):
oldFriendInfo = utils.search_dict_list(
self.memberList, 'UserName', userName)
if oldFriendInfo is None:
return ReturnValue({'BaseResponse': {
'Ret': -1001, }})
url = '%s/webwxoplog?lang=%s&pass_ticket=%s' % (
self.loginInfo['url'], 'zh_CN', self.loginInfo['pass_ticket'])
data = {
'UserName': userName,
'CmdId': 2,
'RemarkName': alias,
'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {'User-Agent': config.USER_AGENT}
r = self.s.post(url, json.dumps(data, ensure_ascii=False).encode('utf8'),
headers=headers)
r = ReturnValue(rawResponse=r)
if r:
oldFriendInfo['RemarkName'] = alias
return r
def set_pinned(self, userName, isPinned=True):
url = '%s/webwxoplog?pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'UserName': userName,
'CmdId': 3,
'OP': int(isPinned),
'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {'User-Agent': config.USER_AGENT}
r = self.s.post(url, json=data, headers=headers)
return ReturnValue(rawResponse=r)
def accept_friend(self, userName, v4='', autoUpdate=True):
url = f"{self.loginInfo['url']}/webwxverifyuser?r={int(time.time())}&pass_ticket={self.loginInfo['pass_ticket']}"
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Opcode': 3, # 3
'VerifyUserListSize': 1,
'VerifyUserList': [{
'Value': userName,
'VerifyUserTicket': v4, }],
'VerifyContent': '',
'SceneListCount': 1,
'SceneList': [33],
'skey': self.loginInfo['skey'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8', 'replace'))
if autoUpdate:
self.update_friend(userName)
return ReturnValue(rawResponse=r)
def get_head_img(self, userName=None, chatroomUserName=None, picDir=None):
''' get head image
* if you want to get chatroom header: only set chatroomUserName
* if you want to get friend header: only set userName
* if you want to get chatroom member header: set both
'''
params = {
'userName': userName or chatroomUserName or self.storageClass.userName,
'skey': self.loginInfo['skey'],
'type': 'big', }
url = '%s/webwxgeticon' % self.loginInfo['url']
if chatroomUserName is None:
infoDict = self.storageClass.search_friends(userName=userName)
if infoDict is None:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'No friend found',
'Ret': -1001, }})
else:
if userName is None:
url = '%s/webwxgetheadimg' % self.loginInfo['url']
else:
chatroom = self.storageClass.search_chatrooms(
userName=chatroomUserName)
if chatroomUserName is None:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'No chatroom found',
'Ret': -1001, }})
if 'EncryChatRoomId' in chatroom:
params['chatroomid'] = chatroom['EncryChatRoomId']
params['chatroomid'] = params.get(
'chatroomid') or chatroom['UserName']
headers = {'User-Agent': config.USER_AGENT}
r = self.s.get(url, params=params, stream=True, headers=headers)
tempStorage = io.BytesIO()
for block in r.iter_content(1024):
tempStorage.write(block)
if picDir is None:
return tempStorage.getvalue()
with open(picDir, 'wb') as f:
f.write(tempStorage.getvalue())
tempStorage.seek(0)
return ReturnValue({'BaseResponse': {
'ErrMsg': 'Successfully downloaded',
'Ret': 0, },
'PostFix': utils.get_image_postfix(tempStorage.read(20)), })
def create_chatroom(self, memberList, topic=''):
url = '%s/webwxcreatechatroom?pass_ticket=%s&r=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'], int(time.time()))
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'MemberCount': len(memberList.split(',')),
'MemberList': [{'UserName': member} for member in memberList.split(',')],
'Topic': topic, }
headers = {
'content-type': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8', 'ignore'))
return ReturnValue(rawResponse=r)
def set_chatroom_name(self, chatroomUserName, name):
url = '%s/webwxupdatechatroom?fun=modtopic&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'ChatRoomName': chatroomUserName,
'NewTopic': name, }
headers = {
'content-type': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8', 'ignore'))
return ReturnValue(rawResponse=r)
def delete_member_from_chatroom(self, chatroomUserName, memberList):
url = '%s/webwxupdatechatroom?fun=delmember&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'ChatRoomName': chatroomUserName,
'DelMemberList': ','.join([member['UserName'] for member in memberList]), }
headers = {
'content-type': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def add_member_into_chatroom(self, chatroomUserName, memberList,
useInvitation=False):
''' add or invite member into chatroom
* there are two ways to get members into chatroom: invite or directly add
* but for chatrooms with more than 40 users, you can only use invite
* but don't worry we will auto-force userInvitation for you when necessary
'''
if not useInvitation:
chatroom = self.storageClass.search_chatrooms(
userName=chatroomUserName)
if not chatroom:
chatroom = self.update_chatroom(chatroomUserName)
if len(chatroom['MemberList']) > self.loginInfo['InviteStartCount']:
useInvitation = True
if useInvitation:
fun, memberKeyName = 'invitemember', 'InviteMemberList'
else:
fun, memberKeyName = 'addmember', 'AddMemberList'
url = '%s/webwxupdatechatroom?fun=%s&pass_ticket=%s' % (
self.loginInfo['url'], fun, self.loginInfo['pass_ticket'])
params = {
'BaseRequest': self.loginInfo['BaseRequest'],
'ChatRoomName': chatroomUserName,
memberKeyName: memberList, }
headers = {
'content-type': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
r = self.s.post(url, data=json.dumps(params), headers=headers)
return ReturnValue(rawResponse=r)
def load_contact(core):
core.update_chatroom = update_chatroom
core.update_friend = update_friend
core.get_contact = get_contact
core.get_friends = get_friends
core.get_chatrooms = get_chatrooms
core.get_mps = get_mps
core.set_alias = set_alias
core.set_pinned = set_pinned
core.accept_friend = accept_friend
core.get_head_img = get_head_img
core.create_chatroom = create_chatroom
core.set_chatroom_name = set_chatroom_name
core.delete_member_from_chatroom = delete_member_from_chatroom
core.add_member_into_chatroom = add_member_into_chatroom | null |
9,774 | import os, time, re, io
import json
import mimetypes, hashlib
import logging
from collections import OrderedDict
import requests
from .. import config, utils
from ..returnvalues import ReturnValue
from ..storage import templates
from .contact import update_local_uin
def send_raw_msg(self, msgType, content, toUserName):
url = '%s/webwxsendmsg' % self.loginInfo['url']
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Msg': {
'Type': msgType,
'Content': content,
'FromUserName': self.storageClass.userName,
'ToUserName': (toUserName if toUserName else self.storageClass.userName),
'LocalID': int(time.time() * 1e4),
'ClientMsgId': int(time.time() * 1e4),
},
'Scene': 0, }
headers = { 'ContentType': 'application/json; charset=UTF-8', 'User-Agent' : config.USER_AGENT }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
def send_msg(self, msg='Test Message', toUserName=None):
logger.debug('Request to send a text message to %s: %s' % (toUserName, msg))
r = self.send_raw_msg(1, msg, toUserName)
return r
def upload_file(self, fileDir, isPicture=False, isVideo=False,
toUserName='filehelper', file_=None, preparedFile=None):
logger.debug('Request to upload a %s: %s' % (
'picture' if isPicture else 'video' if isVideo else 'file', fileDir))
if not preparedFile:
preparedFile = _prepare_file(fileDir, file_)
if not preparedFile:
return preparedFile
fileSize, fileMd5, file_ = \
preparedFile['fileSize'], preparedFile['fileMd5'], preparedFile['file_']
fileSymbol = 'pic' if isPicture else 'video' if isVideo else'doc'
chunks = int((fileSize - 1) / 524288) + 1
clientMediaId = int(time.time() * 1e4)
uploadMediaRequest = json.dumps(OrderedDict([
('UploadType', 2),
('BaseRequest', self.loginInfo['BaseRequest']),
('ClientMediaId', clientMediaId),
('TotalLen', fileSize),
('StartPos', 0),
('DataLen', fileSize),
('MediaType', 4),
('FromUserName', self.storageClass.userName),
('ToUserName', toUserName),
('FileMd5', fileMd5)]
), separators = (',', ':'))
r = {'BaseResponse': {'Ret': -1005, 'ErrMsg': 'Empty file detected'}}
for chunk in range(chunks):
r = upload_chunk_file(self, fileDir, fileSymbol, fileSize,
file_, chunk, chunks, uploadMediaRequest)
file_.close()
if isinstance(r, dict):
return ReturnValue(r)
return ReturnValue(rawResponse=r)
def send_file(self, fileDir, toUserName=None, mediaId=None, file_=None):
logger.debug('Request to send a file(mediaId: %s) to %s: %s' % (
mediaId, toUserName, fileDir))
if hasattr(fileDir, 'read'):
return ReturnValue({'BaseResponse': {
'ErrMsg': 'fileDir param should not be an opened file in send_file',
'Ret': -1005, }})
if toUserName is None:
toUserName = self.storageClass.userName
preparedFile = _prepare_file(fileDir, file_)
if not preparedFile:
return preparedFile
fileSize = preparedFile['fileSize']
if mediaId is None:
r = self.upload_file(fileDir, preparedFile=preparedFile)
if r:
mediaId = r['MediaId']
else:
return r
url = '%s/webwxsendappmsg?fun=async&f=json' % self.loginInfo['url']
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Msg': {
'Type': 6,
'Content': ("<appmsg appid='wxeb7ec651dd0aefa9' sdkver=''><title>%s</title>" % os.path.basename(fileDir) +
"<des></des><action></action><type>6</type><content></content><url></url><lowurl></lowurl>" +
"<appattach><totallen>%s</totallen><attachid>%s</attachid>" % (str(fileSize), mediaId) +
"<fileext>%s</fileext></appattach><extinfo></extinfo></appmsg>" % os.path.splitext(fileDir)[1].replace('.','')),
'FromUserName': self.storageClass.userName,
'ToUserName': toUserName,
'LocalID': int(time.time() * 1e4),
'ClientMsgId': int(time.time() * 1e4), },
'Scene': 0, }
headers = {
'User-Agent': config.USER_AGENT,
'Content-Type': 'application/json;charset=UTF-8', }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
def send_image(self, fileDir=None, toUserName=None, mediaId=None, file_=None):
logger.debug('Request to send a image(mediaId: %s) to %s: %s' % (
mediaId, toUserName, fileDir))
if fileDir or file_:
if hasattr(fileDir, 'read'):
file_, fileDir = fileDir, None
if fileDir is None:
fileDir = 'tmp.jpg' # specific fileDir to send gifs
else:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'Either fileDir or file_ should be specific',
'Ret': -1005, }})
if toUserName is None:
toUserName = self.storageClass.userName
if mediaId is None:
r = self.upload_file(fileDir, isPicture=not fileDir[-4:] == '.gif', file_=file_)
if r:
mediaId = r['MediaId']
else:
return r
url = '%s/webwxsendmsgimg?fun=async&f=json' % self.loginInfo['url']
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Msg': {
'Type': 3,
'MediaId': mediaId,
'FromUserName': self.storageClass.userName,
'ToUserName': toUserName,
'LocalID': int(time.time() * 1e4),
'ClientMsgId': int(time.time() * 1e4), },
'Scene': 0, }
if fileDir[-4:] == '.gif':
url = '%s/webwxsendemoticon?fun=sys' % self.loginInfo['url']
data['Msg']['Type'] = 47
data['Msg']['EmojiFlag'] = 2
headers = {
'User-Agent': config.USER_AGENT,
'Content-Type': 'application/json;charset=UTF-8', }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
def send_video(self, fileDir=None, toUserName=None, mediaId=None, file_=None):
logger.debug('Request to send a video(mediaId: %s) to %s: %s' % (
mediaId, toUserName, fileDir))
if fileDir or file_:
if hasattr(fileDir, 'read'):
file_, fileDir = fileDir, None
if fileDir is None:
fileDir = 'tmp.mp4' # specific fileDir to send other formats
else:
return ReturnValue({'BaseResponse': {
'ErrMsg': 'Either fileDir or file_ should be specific',
'Ret': -1005, }})
if toUserName is None:
toUserName = self.storageClass.userName
if mediaId is None:
r = self.upload_file(fileDir, isVideo=True, file_=file_)
if r:
mediaId = r['MediaId']
else:
return r
url = '%s/webwxsendvideomsg?fun=async&f=json&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Msg': {
'Type' : 43,
'MediaId' : mediaId,
'FromUserName' : self.storageClass.userName,
'ToUserName' : toUserName,
'LocalID' : int(time.time() * 1e4),
'ClientMsgId' : int(time.time() * 1e4), },
'Scene': 0, }
headers = {
'User-Agent' : config.USER_AGENT,
'Content-Type': 'application/json;charset=UTF-8', }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
def send(self, msg, toUserName=None, mediaId=None):
if not msg:
r = ReturnValue({'BaseResponse': {
'ErrMsg': 'No message.',
'Ret': -1005, }})
elif msg[:5] == '@fil@':
if mediaId is None:
r = self.send_file(msg[5:], toUserName)
else:
r = self.send_file(msg[5:], toUserName, mediaId)
elif msg[:5] == '@img@':
if mediaId is None:
r = self.send_image(msg[5:], toUserName)
else:
r = self.send_image(msg[5:], toUserName, mediaId)
elif msg[:5] == '@msg@':
r = self.send_msg(msg[5:], toUserName)
elif msg[:5] == '@vid@':
if mediaId is None:
r = self.send_video(msg[5:], toUserName)
else:
r = self.send_video(msg[5:], toUserName, mediaId)
else:
r = self.send_msg(msg, toUserName)
return r
def revoke(self, msgId, toUserName, localId=None):
url = '%s/webwxrevokemsg' % self.loginInfo['url']
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
"ClientMsgId": localId or str(time.time() * 1e3),
"SvrMsgId": msgId,
"ToUserName": toUserName}
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r)
def load_messages(core):
core.send_raw_msg = send_raw_msg
core.send_msg = send_msg
core.upload_file = upload_file
core.send_file = send_file
core.send_image = send_image
core.send_video = send_video
core.send = send
core.revoke = revoke | null |
9,775 | import os
import time
import re
import io
import threading
import json
import xml.dom.minidom
import random
import traceback
import logging
import requests
from pyqrcode import QRCode
from .. import config, utils
from ..returnvalues import ReturnValue
from ..storage.templates import wrap_user_dict
from .contact import update_local_chatrooms, update_local_friends
from .messages import produce_msg
def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if self.alive or self.isLogging:
logger.warning('itchat has already logged in.')
return
self.isLogging = True
logger.info('Ready to login.')
while self.isLogging:
uuid = push_login(self)
if uuid:
qrStorage = io.BytesIO()
else:
logger.info('Getting uuid of QR code.')
while not self.get_QRuuid():
time.sleep(1)
logger.info('Downloading QR code.')
qrStorage = self.get_QR(enableCmdQR=enableCmdQR,
picDir=picDir, qrCallback=qrCallback)
# logger.info('Please scan the QR code to log in.')
isLoggedIn = False
while not isLoggedIn:
status = self.check_login()
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=self.uuid, status=status,
qrcode=qrStorage.getvalue())
if status == '200':
isLoggedIn = True
elif status == '201':
if isLoggedIn is not None:
logger.info('Please press confirm on your phone.')
isLoggedIn = None
time.sleep(7)
time.sleep(0.5)
elif status != '408':
break
if isLoggedIn:
break
elif self.isLogging:
logger.info('Log in time out, reloading QR code.')
else:
return # log in process is stopped by user
logger.info('Loading the contact, this may take a little while.')
self.web_init()
self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
# utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
self.start_receiving(exitCallback)
self.isLogging = False
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid': 'wx782c26e4c19acffb',
'fun': 'new',
'redirect_uri': 'https://wx.qq.com/cgi-bin/mmwebwx-bin/webwxnewloginpage?mod=desktop',
'lang': 'zh_CN'}
headers = {'User-Agent': config.USER_AGENT}
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None):
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
qrStorage = io.BytesIO()
qrCode = QRCode('https://login.weixin.qq.com/l/' + uuid)
qrCode.png(qrStorage, scale=10)
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue())
else:
with open(picDir, 'wb') as f:
f.write(qrStorage.getvalue())
if enableCmdQR:
utils.print_cmd_qr(qrCode.text(1), enableCmdQR=enableCmdQR)
else:
utils.print_qr(picDir)
return qrStorage
def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
localTime = int(time.time())
params = 'loginicon=true&uuid=%s&tip=1&r=%s&_=%s' % (
uuid, int(-localTime / 1579), localTime)
headers = {'User-Agent': config.USER_AGENT}
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
if process_login_info(self, r.text):
return '200'
else:
return '400'
elif data:
return data.group(1)
else:
return '400'
def web_init(self):
url = '%s/webwxinit' % self.loginInfo['url']
params = {
'r': int(-time.time() / 1579),
'pass_ticket': self.loginInfo['pass_ticket'], }
data = {'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT, }
r = self.s.post(url, params=params, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
# deal with login info
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = wrap_user_dict(
utils.struct_friend_info(dic['User']))
self.memberList.append(self.loginInfo['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
# deal with contact list returned when init
contactList = dic.get('ContactList', [])
chatroomList, otherList = [], []
for m in contactList:
if m['Sex'] != 0:
otherList.append(m)
elif '@@' in m['UserName']:
m['MemberList'] = [] # don't let dirty info pollute the list
chatroomList.append(m)
elif '@' in m['UserName']:
# mp will be dealt in update_local_friends as well
otherList.append(m)
if chatroomList:
update_local_chatrooms(self, chatroomList)
if otherList:
update_local_friends(self, otherList)
return dic
def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'Code': 3,
'FromUserName': self.storageClass.userName,
'ToUserName': self.storageClass.userName,
'ClientMsgId': int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
pass
else:
msgList, contactList = self.get_msg()
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList:
self.msgList.put(msg)
if contactList:
chatroomList, otherList = [], []
for contact in contactList:
if '@@' in contact['UserName']:
chatroomList.append(contact)
else:
otherList.append(contact)
chatroomMsg = update_local_chatrooms(
self, chatroomList)
chatroomMsg['User'] = self.loginInfo['User']
self.msgList.put(chatroomMsg)
update_local_friends(self, otherList)
retryCount = 0
except requests.exceptions.ReadTimeout:
pass
except:
retryCount += 1
logger.error(traceback.format_exc())
if self.receivingRetryCount < retryCount:
logger.error("Having tried %s times, but still failed. " % (
retryCount) + "Stop trying...")
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback()
else:
logger.info('LOG OUT!')
if getReceivingFnOnly:
return maintain_loop
else:
maintainThread = threading.Thread(target=maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def get_msg(self):
self.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest': self.loginInfo['BaseRequest'],
'SyncKey': self.loginInfo['SyncKey'],
'rr': ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
r = self.s.post(url, data=json.dumps(data),
headers=headers, timeout=config.TIMEOUT)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0:
return None, None
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect': 1,
'type': 1,
'skey': self.loginInfo['skey'], }
headers = {'User-Agent': config.USER_AGENT}
self.s.get(url, params=params, headers=headers)
self.alive = False
self.isLogging = False
self.s.cookies.clear()
del self.chatroomList[:]
del self.memberList[:]
del self.mpList[:]
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout | null |
9,776 | import logging, traceback, sys, threading
from ..log import set_logging
from ..utils import test_connect
from ..storage import templates
def auto_login(self, hotReload=False, statusStorageDir='itchat.pkl',
enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if not test_connect():
logger.info("You can't get access to internet or wechat domain, so exit.")
sys.exit()
self.useHotReload = hotReload
self.hotReloadDir = statusStorageDir
if hotReload:
rval=self.load_login_status(statusStorageDir,
loginCallback=loginCallback, exitCallback=exitCallback)
if rval:
return
logger.error('Hot reload failed, logging in normally, error={}'.format(rval))
self.logout()
self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback,
loginCallback=loginCallback, exitCallback=exitCallback)
self.dump_login_status(statusStorageDir)
else:
self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback,
loginCallback=loginCallback, exitCallback=exitCallback)
def configured_reply(self):
''' determine the type of message and reply if its method is defined
however, I use a strange way to determine whether a msg is from massive platform
I haven't found a better solution here
The main problem I'm worrying about is the mismatching of new friends added on phone
If you have any good idea, pleeeease report an issue. I will be more than grateful.
'''
try:
msg = self.msgList.get(timeout=1)
except Queue.Empty:
pass
else:
if isinstance(msg['User'], templates.User):
replyFn = self.functionDict['FriendChat'].get(msg['Type'])
elif isinstance(msg['User'], templates.MassivePlatform):
replyFn = self.functionDict['MpChat'].get(msg['Type'])
elif isinstance(msg['User'], templates.Chatroom):
replyFn = self.functionDict['GroupChat'].get(msg['Type'])
if replyFn is None:
r = None
else:
try:
r = replyFn(msg)
if r is not None:
self.send(r, msg.get('FromUserName'))
except:
logger.warning(traceback.format_exc())
def msg_register(self, msgType, isFriendChat=False, isGroupChat=False, isMpChat=False):
''' a decorator constructor
return a specific decorator based on information given '''
if not (isinstance(msgType, list) or isinstance(msgType, tuple)):
msgType = [msgType]
def _msg_register(fn):
for _msgType in msgType:
if isFriendChat:
self.functionDict['FriendChat'][_msgType] = fn
if isGroupChat:
self.functionDict['GroupChat'][_msgType] = fn
if isMpChat:
self.functionDict['MpChat'][_msgType] = fn
if not any((isFriendChat, isGroupChat, isMpChat)):
self.functionDict['FriendChat'][_msgType] = fn
return fn
return _msg_register
def run(self, debug=False, blockThread=True):
logger.info('Start auto replying.')
if debug:
set_logging(loggingLevel=logging.DEBUG)
def reply_fn():
try:
while self.alive:
self.configured_reply()
except KeyboardInterrupt:
if self.useHotReload:
self.dump_login_status()
self.alive = False
logger.debug('itchat received an ^C and exit.')
logger.info('Bye~')
if blockThread:
reply_fn()
else:
replyThread = threading.Thread(target=reply_fn)
replyThread.setDaemon(True)
replyThread.start()
def load_register(core):
core.auto_login = auto_login
core.configured_reply = configured_reply
core.msg_register = msg_register
core.run = run | null |
9,777 | import pickle, os
import logging
import requests
from ..config import VERSION
from ..returnvalues import ReturnValue
from ..storage import templates
from .contact import update_local_chatrooms, update_local_friends
from .messages import produce_msg
def dump_login_status(self, fileDir=None):
def load_login_status(self, fileDir,
loginCallback=None, exitCallback=None):
def load_hotreload(core):
core.dump_login_status = dump_login_status
core.load_login_status = load_login_status | null |
9,778 | import re
import time
import requests
import config
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.session_manager import SessionManager
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, pconf
import threading
from common import memory, utils
import base64
import os
=
def _download_file(url: str):
try:
file_path = "tmp"
if not os.path.exists(file_path):
os.makedirs(file_path)
file_name = url.split("/")[-1] # 获取文件名
file_path = os.path.join(file_path, file_name)
response = requests.get(url)
with open(file_path, "wb") as f:
f.write(response.content)
return file_path
except Exception as e:
logger.warn(e) | null |
9,779 | from common import const
class BaiduWenxinBot(Bot):
def __init__(self):
super().__init__()
wenxin_model = conf().get("baidu_wenxin_model") or "eb-instant"
if conf().get("model") and conf().get("model") == "wenxin-4":
wenxin_model = "completions_pro"
self.sessions = SessionManager(BaiduWenxinSession, model=wenxin_model)
def reply(self, query, context=None):
# acquire reply content
if context and context.type:
if context.type == ContextType.TEXT:
logger.info("[BAIDU] query={}".format(query))
session_id = context["session_id"]
reply = None
if query == "#清除记忆":
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
else:
session = self.sessions.session_query(query, session_id)
result = self.reply_text(session)
total_tokens, completion_tokens, reply_content = (
result["total_tokens"],
result["completion_tokens"],
result["content"],
)
logger.debug(
"[BAIDU] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session.messages, session_id, reply_content, completion_tokens)
)
if total_tokens == 0:
reply = Reply(ReplyType.ERROR, reply_content)
else:
self.sessions.session_reply(reply_content, session_id, total_tokens)
reply = Reply(ReplyType.TEXT, reply_content)
return reply
elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
def reply_text(self, session: BaiduWenxinSession, retry_count=0):
try:
logger.info("[BAIDU] model={}".format(session.model))
access_token = self.get_access_token()
if access_token == 'None':
logger.warn("[BAIDU] access token 获取失败")
return {
"total_tokens": 0,
"completion_tokens": 0,
"content": 0,
}
url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/" + session.model + "?access_token=" + access_token
headers = {
'Content-Type': 'application/json'
}
payload = {'messages': session.messages}
response = requests.request("POST", url, headers=headers, data=json.dumps(payload))
response_text = json.loads(response.text)
logger.info(f"[BAIDU] response text={response_text}")
res_content = response_text["result"]
total_tokens = response_text["usage"]["total_tokens"]
completion_tokens = response_text["usage"]["completion_tokens"]
logger.info("[BAIDU] reply={}".format(res_content))
return {
"total_tokens": total_tokens,
"completion_tokens": completion_tokens,
"content": res_content,
}
except Exception as e:
need_retry = retry_count < 2
logger.warn("[BAIDU] Exception: {}".format(e))
need_retry = False
self.sessions.clear_session(session.session_id)
result = {"completion_tokens": 0, "content": "出错了: {}".format(e)}
return result
def get_access_token(self):
"""
使用 AK,SK 生成鉴权签名(Access Token)
:return: access_token,或是None(如果错误)
"""
url = "https://aip.baidubce.com/oauth/2.0/token"
params = {"grant_type": "client_credentials", "client_id": BAIDU_API_KEY, "client_secret": BAIDU_SECRET_KEY}
return str(requests.post(url, params=params).json().get("access_token"))
class XunFeiBot(Bot):
def __init__(self):
super().__init__()
self.app_id = conf().get("xunfei_app_id")
self.api_key = conf().get("xunfei_api_key")
self.api_secret = conf().get("xunfei_api_secret")
# 默认使用v2.0版本: "generalv2"
# v1.5版本为 "general"
# v3.0版本为: "generalv3"
self.domain = "generalv3"
# 默认使用v2.0版本: "ws://spark-api.xf-yun.com/v2.1/chat"
# v1.5版本为: "ws://spark-api.xf-yun.com/v1.1/chat"
# v3.0版本为: "ws://spark-api.xf-yun.com/v3.1/chat"
self.spark_url = "ws://spark-api.xf-yun.com/v3.1/chat"
self.host = urlparse(self.spark_url).netloc
self.path = urlparse(self.spark_url).path
# 和wenxin使用相同的session机制
self.sessions = SessionManager(BaiduWenxinSession, model=const.XUNFEI)
def reply(self, query, context: Context = None) -> Reply:
if context.type == ContextType.TEXT:
logger.info("[XunFei] query={}".format(query))
session_id = context["session_id"]
request_id = self.gen_request_id(session_id)
reply_map[request_id] = ""
session = self.sessions.session_query(query, session_id)
threading.Thread(target=self.create_web_socket,
args=(session.messages, request_id)).start()
depth = 0
time.sleep(0.1)
t1 = time.time()
usage = {}
while depth <= 300:
try:
data_queue = queue_map.get(request_id)
if not data_queue:
depth += 1
time.sleep(0.1)
continue
data_item = data_queue.get(block=True, timeout=0.1)
if data_item.is_end:
# 请求结束
del queue_map[request_id]
if data_item.reply:
reply_map[request_id] += data_item.reply
usage = data_item.usage
break
reply_map[request_id] += data_item.reply
depth += 1
except Exception as e:
depth += 1
continue
t2 = time.time()
logger.info(
f"[XunFei-API] response={reply_map[request_id]}, time={t2 - t1}s, usage={usage}"
)
self.sessions.session_reply(reply_map[request_id], session_id,
usage.get("total_tokens"))
reply = Reply(ReplyType.TEXT, reply_map[request_id])
del reply_map[request_id]
return reply
else:
reply = Reply(ReplyType.ERROR,
"Bot不支持处理{}类型的消息".format(context.type))
return reply
def create_web_socket(self, prompt, session_id, temperature=0.5):
logger.info(f"[XunFei] start connect, prompt={prompt}")
websocket.enableTrace(False)
wsUrl = self.create_url()
ws = websocket.WebSocketApp(wsUrl,
on_message=on_message,
on_error=on_error,
on_close=on_close,
on_open=on_open)
data_queue = queue.Queue(1000)
queue_map[session_id] = data_queue
ws.appid = self.app_id
ws.question = prompt
ws.domain = self.domain
ws.session_id = session_id
ws.temperature = temperature
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
def gen_request_id(self, session_id: str):
return session_id + "_" + str(int(time.time())) + "" + str(
random.randint(0, 100))
# 生成url
def create_url(self):
# 生成RFC1123格式的时间戳
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
# 拼接字符串
signature_origin = "host: " + self.host + "\n"
signature_origin += "date: " + date + "\n"
signature_origin += "GET " + self.path + " HTTP/1.1"
# 进行hmac-sha256进行加密
signature_sha = hmac.new(self.api_secret.encode('utf-8'),
signature_origin.encode('utf-8'),
digestmod=hashlib.sha256).digest()
signature_sha_base64 = base64.b64encode(signature_sha).decode(
encoding='utf-8')
authorization_origin = f'api_key="{self.api_key}", algorithm="hmac-sha256", headers="host date request-line", ' \
f'signature="{signature_sha_base64}"'
authorization = base64.b64encode(
authorization_origin.encode('utf-8')).decode(encoding='utf-8')
# 将请求的鉴权参数组合为字典
v = {"authorization": authorization, "date": date, "host": self.host}
# 拼接鉴权参数,生成url
url = self.spark_url + '?' + urlencode(v)
# 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致
return url
def gen_params(self, appid, domain, question):
"""
通过appid和用户的提问来生成请参数
"""
data = {
"header": {
"app_id": appid,
"uid": "1234"
},
"parameter": {
"chat": {
"domain": domain,
"random_threshold": 0.5,
"max_tokens": 2048,
"auditing": "default"
}
},
"payload": {
"message": {
"text": question
}
}
}
return data
class LinkAIBot(Bot):
# authentication failed
AUTH_FAILED_CODE = 401
NO_QUOTA_CODE = 406
def __init__(self):
super().__init__()
self.sessions = LinkAISessionManager(LinkAISession, model=conf().get("model") or "gpt-3.5-turbo")
self.args = {}
def reply(self, query, context: Context = None) -> Reply:
if context.type == ContextType.TEXT:
return self._chat(query, context)
elif context.type == ContextType.IMAGE_CREATE:
if not conf().get("text_to_image"):
logger.warn("[LinkAI] text_to_image is not enabled, ignore the IMAGE_CREATE request")
return Reply(ReplyType.TEXT, "")
ok, res = self.create_img(query, 0)
if ok:
reply = Reply(ReplyType.IMAGE_URL, res)
else:
reply = Reply(ReplyType.ERROR, res)
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def _chat(self, query, context, retry_count=0) -> Reply:
"""
发起对话请求
:param query: 请求提示词
:param context: 对话上下文
:param retry_count: 当前递归重试次数
:return: 回复
"""
if retry_count > 2:
# exit from retry 2 times
logger.warn("[LINKAI] failed after maximum number of retry times")
return Reply(ReplyType.TEXT, "请再问我一次吧")
try:
# load config
if context.get("generate_breaked_by"):
logger.info(f"[LINKAI] won't set appcode because a plugin ({context['generate_breaked_by']}) affected the context")
app_code = None
else:
plugin_app_code = self._find_group_mapping_code(context)
app_code = context.kwargs.get("app_code") or plugin_app_code or conf().get("linkai_app_code")
linkai_api_key = conf().get("linkai_api_key")
session_id = context["session_id"]
session_message = self.sessions.session_msg_query(query, session_id)
logger.debug(f"[LinkAI] session={session_message}, session_id={session_id}")
# image process
img_cache = memory.USER_IMAGE_CACHE.get(session_id)
if img_cache:
messages = self._process_image_msg(app_code=app_code, session_id=session_id, query=query, img_cache=img_cache)
if messages:
session_message = messages
model = conf().get("model")
# remove system message
if session_message[0].get("role") == "system":
if app_code or model == "wenxin":
session_message.pop(0)
body = {
"app_code": app_code,
"messages": session_message,
"model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
"temperature": conf().get("temperature"),
"top_p": conf().get("top_p", 1),
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"session_id": session_id,
"channel_type": conf().get("channel_type", "wx")
}
try:
from linkai import LinkAIClient
client_id = LinkAIClient.fetch_client_id()
if client_id:
body["client_id"] = client_id
# start: client info deliver
if context.kwargs.get("msg"):
body["session_id"] = context.kwargs.get("msg").from_user_id
if context.kwargs.get("msg").is_group:
body["is_group"] = True
body["group_name"] = context.kwargs.get("msg").from_user_nickname
body["sender_name"] = context.kwargs.get("msg").actual_user_nickname
else:
if body.get("channel_type") in ["wechatcom_app"]:
body["sender_name"] = context.kwargs.get("msg").from_user_id
else:
body["sender_name"] = context.kwargs.get("msg").from_user_nickname
except Exception as e:
pass
file_id = context.kwargs.get("file_id")
if file_id:
body["file_id"] = file_id
logger.info(f"[LINKAI] query={query}, app_code={app_code}, model={body.get('model')}, file_id={file_id}")
headers = {"Authorization": "Bearer " + linkai_api_key}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers,
timeout=conf().get("request_timeout", 180))
if res.status_code == 200:
# execute success
response = res.json()
reply_content = response["choices"][0]["message"]["content"]
total_tokens = response["usage"]["total_tokens"]
logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}")
self.sessions.session_reply(reply_content, session_id, total_tokens, query=query)
agent_suffix = self._fetch_agent_suffix(response)
if agent_suffix:
reply_content += agent_suffix
if not agent_suffix:
knowledge_suffix = self._fetch_knowledge_search_suffix(response)
if knowledge_suffix:
reply_content += knowledge_suffix
# image process
if response["choices"][0].get("img_urls"):
thread = threading.Thread(target=self._send_image, args=(context.get("channel"), context, response["choices"][0].get("img_urls")))
thread.start()
if response["choices"][0].get("text_content"):
reply_content = response["choices"][0].get("text_content")
reply_content = self._process_url(reply_content)
return Reply(ReplyType.TEXT, reply_content)
else:
response = res.json()
error = response.get("error")
logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
if res.status_code >= 500:
# server error, need retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
return Reply(ReplyType.TEXT, "提问太快啦,请休息一下再问我吧")
except Exception as e:
logger.exception(e)
# retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
def _process_image_msg(self, app_code: str, session_id: str, query:str, img_cache: dict):
try:
enable_image_input = False
app_info = self._fetch_app_info(app_code)
if not app_info:
logger.debug(f"[LinkAI] not found app, can't process images, app_code={app_code}")
return None
plugins = app_info.get("data").get("plugins")
for plugin in plugins:
if plugin.get("input_type") and "IMAGE" in plugin.get("input_type"):
enable_image_input = True
if not enable_image_input:
return
msg = img_cache.get("msg")
path = img_cache.get("path")
msg.prepare()
logger.info(f"[LinkAI] query with images, path={path}")
messages = self._build_vision_msg(query, path)
memory.USER_IMAGE_CACHE[session_id] = None
return messages
except Exception as e:
logger.exception(e)
def _find_group_mapping_code(self, context):
try:
if context.kwargs.get("isgroup"):
group_name = context.kwargs.get("msg").from_user_nickname
if config.plugin_config and config.plugin_config.get("linkai"):
linkai_config = config.plugin_config.get("linkai")
group_mapping = linkai_config.get("group_app_map")
if group_mapping and group_name:
return group_mapping.get(group_name)
except Exception as e:
logger.exception(e)
return None
def _build_vision_msg(self, query: str, path: str):
try:
suffix = utils.get_path_suffix(path)
with open(path, "rb") as file:
base64_str = base64.b64encode(file.read()).decode('utf-8')
messages = [{
"role": "user",
"content": [
{
"type": "text",
"text": query
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/{suffix};base64,{base64_str}"
}
}
]
}]
return messages
except Exception as e:
logger.exception(e)
def reply_text(self, session: ChatGPTSession, app_code="", retry_count=0) -> dict:
if retry_count >= 2:
# exit from retry 2 times
logger.warn("[LINKAI] failed after maximum number of retry times")
return {
"total_tokens": 0,
"completion_tokens": 0,
"content": "请再问我一次吧"
}
try:
body = {
"app_code": app_code,
"messages": session.messages,
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
"temperature": conf().get("temperature"),
"top_p": conf().get("top_p", 1),
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
}
if self.args.get("max_tokens"):
body["max_tokens"] = self.args.get("max_tokens")
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers,
timeout=conf().get("request_timeout", 180))
if res.status_code == 200:
# execute success
response = res.json()
reply_content = response["choices"][0]["message"]["content"]
total_tokens = response["usage"]["total_tokens"]
logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}")
return {
"total_tokens": total_tokens,
"completion_tokens": response["usage"]["completion_tokens"],
"content": reply_content,
}
else:
response = res.json()
error = response.get("error")
logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
if res.status_code >= 500:
# server error, need retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self.reply_text(session, app_code, retry_count + 1)
return {
"total_tokens": 0,
"completion_tokens": 0,
"content": "提问太快啦,请休息一下再问我吧"
}
except Exception as e:
logger.exception(e)
# retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self.reply_text(session, app_code, retry_count + 1)
def _fetch_app_info(self, app_code: str):
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
params = {"app_code": app_code}
res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
if res.status_code == 200:
return res.json()
else:
logger.warning(f"[LinkAI] find app info exception, res={res}")
def create_img(self, query, retry_count=0, api_key=None):
try:
logger.info("[LinkImage] image_query={}".format(query))
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {conf().get('linkai_api_key')}"
}
data = {
"prompt": query,
"n": 1,
"model": conf().get("text_to_image") or "dall-e-2",
"response_format": "url",
"img_proxy": conf().get("image_proxy")
}
url = conf().get("linkai_api_base", "https://api.link-ai.chat") + "/v1/images/generations"
res = requests.post(url, headers=headers, json=data, timeout=(5, 90))
t2 = time.time()
image_url = res.json()["data"][0]["url"]
logger.info("[OPEN_AI] image_url={}".format(image_url))
return True, image_url
except Exception as e:
logger.error(format(e))
return False, "画图出现问题,请休息一下再问我吧"
def _fetch_knowledge_search_suffix(self, response) -> str:
try:
if response.get("knowledge_base"):
search_hit = response.get("knowledge_base").get("search_hit")
first_similarity = response.get("knowledge_base").get("first_similarity")
logger.info(f"[LINKAI] knowledge base, search_hit={search_hit}, first_similarity={first_similarity}")
plugin_config = pconf("linkai")
if plugin_config and plugin_config.get("knowledge_base") and plugin_config.get("knowledge_base").get("search_miss_text_enabled"):
search_miss_similarity = plugin_config.get("knowledge_base").get("search_miss_similarity")
search_miss_text = plugin_config.get("knowledge_base").get("search_miss_suffix")
if not search_hit:
return search_miss_text
if search_miss_similarity and float(search_miss_similarity) > first_similarity:
return search_miss_text
except Exception as e:
logger.exception(e)
def _fetch_agent_suffix(self, response):
try:
plugin_list = []
logger.debug(f"[LinkAgent] res={response}")
if response.get("agent") and response.get("agent").get("chain") and response.get("agent").get("need_show_plugin"):
chain = response.get("agent").get("chain")
suffix = "\n\n- - - - - - - - - - - -"
i = 0
for turn in chain:
plugin_name = turn.get('plugin_name')
suffix += "\n"
need_show_thought = response.get("agent").get("need_show_thought")
if turn.get("thought") and plugin_name and need_show_thought:
suffix += f"{turn.get('thought')}\n"
if plugin_name:
plugin_list.append(turn.get('plugin_name'))
if turn.get('plugin_icon'):
suffix += f"{turn.get('plugin_icon')} "
suffix += f"{turn.get('plugin_name')}"
if turn.get('plugin_input'):
suffix += f":{turn.get('plugin_input')}"
if i < len(chain) - 1:
suffix += "\n"
i += 1
logger.info(f"[LinkAgent] use plugins: {plugin_list}")
return suffix
except Exception as e:
logger.exception(e)
def _process_url(self, text):
try:
url_pattern = re.compile(r'\[(.*?)\]\((http[s]?://.*?)\)')
def replace_markdown_url(match):
return f"{match.group(2)}"
return url_pattern.sub(replace_markdown_url, text)
except Exception as e:
logger.error(e)
def _send_image(self, channel, context, image_urls):
if not image_urls:
return
max_send_num = conf().get("max_media_send_count")
send_interval = conf().get("media_send_interval")
try:
i = 0
for url in image_urls:
if max_send_num and i >= max_send_num:
continue
i += 1
if url.endswith(".mp4"):
reply_type = ReplyType.VIDEO_URL
elif url.endswith(".pdf") or url.endswith(".doc") or url.endswith(".docx") or url.endswith(".csv"):
reply_type = ReplyType.FILE
url = _download_file(url)
if not url:
continue
else:
reply_type = ReplyType.IMAGE_URL
reply = Reply(reply_type, url)
channel.send(reply, context)
if send_interval:
time.sleep(send_interval)
except Exception as e:
logger.error(e)
class ClaudeAIBot(Bot, OpenAIImage):
def __init__(self):
super().__init__()
self.sessions = SessionManager(ClaudeAiSession, model=conf().get("model") or "gpt-3.5-turbo")
self.claude_api_cookie = conf().get("claude_api_cookie")
self.proxy = conf().get("proxy")
self.con_uuid_dic = {}
if self.proxy:
self.proxies = {
"http": self.proxy,
"https": self.proxy
}
else:
self.proxies = None
self.error = ""
self.org_uuid = self.get_organization_id()
def generate_uuid(self):
random_uuid = uuid.uuid4()
random_uuid_str = str(random_uuid)
formatted_uuid = f"{random_uuid_str[0:8]}-{random_uuid_str[9:13]}-{random_uuid_str[14:18]}-{random_uuid_str[19:23]}-{random_uuid_str[24:]}"
return formatted_uuid
def reply(self, query, context: Context = None) -> Reply:
if context.type == ContextType.TEXT:
return self._chat(query, context)
elif context.type == ContextType.IMAGE_CREATE:
ok, res = self.create_img(query, 0)
if ok:
reply = Reply(ReplyType.IMAGE_URL, res)
else:
reply = Reply(ReplyType.ERROR, res)
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def get_organization_id(self):
url = "https://claude.ai/api/organizations"
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': 'https://claude.ai/chats',
'Content-Type': 'application/json',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'Connection': 'keep-alive',
'Cookie': f'{self.claude_api_cookie}'
}
try:
response = requests.get(url, headers=headers, impersonate="chrome110", proxies =self.proxies, timeout=400)
res = json.loads(response.text)
uuid = res[0]['uuid']
except:
if "App unavailable" in response.text:
logger.error("IP error: The IP is not allowed to be used on Claude")
self.error = "ip所在地区不被claude支持"
elif "Invalid authorization" in response.text:
logger.error("Cookie error: Invalid authorization of claude, check cookie please.")
self.error = "无法通过claude身份验证,请检查cookie"
return None
return uuid
def conversation_share_check(self,session_id):
if conf().get("claude_uuid") is not None and conf().get("claude_uuid") != "":
con_uuid = conf().get("claude_uuid")
return con_uuid
if session_id not in self.con_uuid_dic:
self.con_uuid_dic[session_id] = self.generate_uuid()
self.create_new_chat(self.con_uuid_dic[session_id])
return self.con_uuid_dic[session_id]
def check_cookie(self):
flag = self.get_organization_id()
return flag
def create_new_chat(self, con_uuid):
"""
新建claude对话实体
:param con_uuid: 对话id
:return:
"""
url = f"https://claude.ai/api/organizations/{self.org_uuid}/chat_conversations"
payload = json.dumps({"uuid": con_uuid, "name": ""})
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': 'https://claude.ai/chats',
'Content-Type': 'application/json',
'Origin': 'https://claude.ai',
'DNT': '1',
'Connection': 'keep-alive',
'Cookie': self.claude_api_cookie,
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'TE': 'trailers'
}
response = requests.post(url, headers=headers, data=payload, impersonate="chrome110", proxies=self.proxies, timeout=400)
# Returns JSON of the newly created conversation information
return response.json()
def _chat(self, query, context, retry_count=0) -> Reply:
"""
发起对话请求
:param query: 请求提示词
:param context: 对话上下文
:param retry_count: 当前递归重试次数
:return: 回复
"""
if retry_count >= 2:
# exit from retry 2 times
logger.warn("[CLAUDEAI] failed after maximum number of retry times")
return Reply(ReplyType.ERROR, "请再问我一次吧")
try:
session_id = context["session_id"]
if self.org_uuid is None:
return Reply(ReplyType.ERROR, self.error)
session = self.sessions.session_query(query, session_id)
con_uuid = self.conversation_share_check(session_id)
model = conf().get("model") or "gpt-3.5-turbo"
# remove system message
if session.messages[0].get("role") == "system":
if model == "wenxin" or model == "claude":
session.messages.pop(0)
logger.info(f"[CLAUDEAI] query={query}")
# do http request
base_url = "https://claude.ai"
payload = json.dumps({
"completion": {
"prompt": f"{query}",
"timezone": "Asia/Kolkata",
"model": "claude-2"
},
"organization_uuid": f"{self.org_uuid}",
"conversation_uuid": f"{con_uuid}",
"text": f"{query}",
"attachments": []
})
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
'Accept': 'text/event-stream, text/event-stream',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': 'https://claude.ai/chats',
'Content-Type': 'application/json',
'Origin': 'https://claude.ai',
'DNT': '1',
'Connection': 'keep-alive',
'Cookie': f'{self.claude_api_cookie}',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'TE': 'trailers'
}
res = requests.post(base_url + "/api/append_message", headers=headers, data=payload,impersonate="chrome110",proxies= self.proxies,timeout=400)
if res.status_code == 200 or "pemission" in res.text:
# execute success
decoded_data = res.content.decode("utf-8")
decoded_data = re.sub('\n+', '\n', decoded_data).strip()
data_strings = decoded_data.split('\n')
completions = []
for data_string in data_strings:
json_str = data_string[6:].strip()
data = json.loads(json_str)
if 'completion' in data:
completions.append(data['completion'])
reply_content = ''.join(completions)
if "rate limi" in reply_content:
logger.error("rate limit error: The conversation has reached the system speed limit and is synchronized with Cladue. Please go to the official website to check the lifting time")
return Reply(ReplyType.ERROR, "对话达到系统速率限制,与cladue同步,请进入官网查看解除限制时间")
logger.info(f"[CLAUDE] reply={reply_content}, total_tokens=invisible")
self.sessions.session_reply(reply_content, session_id, 100)
return Reply(ReplyType.TEXT, reply_content)
else:
flag = self.check_cookie()
if flag == None:
return Reply(ReplyType.ERROR, self.error)
response = res.json()
error = response.get("error")
logger.error(f"[CLAUDE] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}, detail: {res.text}, uuid: {con_uuid}")
if res.status_code >= 500:
# server error, need retry
time.sleep(2)
logger.warn(f"[CLAUDE] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
return Reply(ReplyType.ERROR, "提问太快啦,请休息一下再问我吧")
except Exception as e:
logger.exception(e)
# retry
time.sleep(2)
logger.warn(f"[CLAUDE] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
class AliQwenBot(Bot):
def __init__(self):
super().__init__()
self.api_key_expired_time = self.set_api_key()
self.sessions = SessionManager(AliQwenSession, model=conf().get("model", const.QWEN))
def api_key_client(self):
return broadscope_bailian.AccessTokenClient(access_key_id=self.access_key_id(), access_key_secret=self.access_key_secret())
def access_key_id(self):
return conf().get("qwen_access_key_id")
def access_key_secret(self):
return conf().get("qwen_access_key_secret")
def agent_key(self):
return conf().get("qwen_agent_key")
def app_id(self):
return conf().get("qwen_app_id")
def node_id(self):
return conf().get("qwen_node_id", "")
def temperature(self):
return conf().get("temperature", 0.2 )
def top_p(self):
return conf().get("top_p", 1)
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[QWEN] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[QWEN] session query={}".format(session.messages))
reply_content = self.reply_text(session)
logger.debug(
"[QWEN] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[QWEN] reply {} used 0 tokens.".format(reply_content))
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: AliQwenSession, retry_count=0) -> dict:
"""
call bailian's ChatCompletion to get the answer
:param session: a conversation session
:param retry_count: retry count
:return: {}
"""
try:
prompt, history = self.convert_messages_format(session.messages)
self.update_api_key_if_expired()
# NOTE 阿里百炼的call()函数未提供temperature参数,考虑到temperature和top_p参数作用相同,取两者较小的值作为top_p参数传入,详情见文档 https://help.aliyun.com/document_detail/2587502.htm
response = broadscope_bailian.Completions().call(app_id=self.app_id(), prompt=prompt, history=history, top_p=min(self.temperature(), self.top_p()))
completion_content = self.get_completion_content(response, self.node_id())
completion_tokens, total_tokens = self.calc_tokens(session.messages, completion_content)
return {
"total_tokens": total_tokens,
"completion_tokens": completion_tokens,
"content": completion_content,
}
except Exception as e:
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if isinstance(e, openai.error.RateLimitError):
logger.warn("[QWEN] RateLimitError: {}".format(e))
result["content"] = "提问太快啦,请休息一下再问我吧"
if need_retry:
time.sleep(20)
elif isinstance(e, openai.error.Timeout):
logger.warn("[QWEN] Timeout: {}".format(e))
result["content"] = "我没有收到你的消息"
if need_retry:
time.sleep(5)
elif isinstance(e, openai.error.APIError):
logger.warn("[QWEN] Bad Gateway: {}".format(e))
result["content"] = "请再问我一次"
if need_retry:
time.sleep(10)
elif isinstance(e, openai.error.APIConnectionError):
logger.warn("[QWEN] APIConnectionError: {}".format(e))
need_retry = False
result["content"] = "我连接不到你的网络"
else:
logger.exception("[QWEN] Exception: {}".format(e))
need_retry = False
self.sessions.clear_session(session.session_id)
if need_retry:
logger.warn("[QWEN] 第{}次重试".format(retry_count + 1))
return self.reply_text(session, retry_count + 1)
else:
return result
def set_api_key(self):
api_key, expired_time = self.api_key_client().create_token(agent_key=self.agent_key())
broadscope_bailian.api_key = api_key
return expired_time
def update_api_key_if_expired(self):
if time.time() > self.api_key_expired_time:
self.api_key_expired_time = self.set_api_key()
def convert_messages_format(self, messages) -> Tuple[str, List[ChatQaMessage]]:
history = []
user_content = ''
assistant_content = ''
system_content = ''
for message in messages:
role = message.get('role')
if role == 'user':
user_content += message.get('content')
elif role == 'assistant':
assistant_content = message.get('content')
history.append(ChatQaMessage(user_content, assistant_content))
user_content = ''
assistant_content = ''
elif role =='system':
system_content += message.get('content')
if user_content == '':
raise Exception('no user message')
if system_content != '':
# NOTE 模拟系统消息,测试发现人格描述以"你需要扮演ChatGPT"开头能够起作用,而以"你是ChatGPT"开头模型会直接否认
system_qa = ChatQaMessage(system_content, '好的,我会严格按照你的设定回答问题')
history.insert(0, system_qa)
logger.debug("[QWEN] converted qa messages: {}".format([item.to_dict() for item in history]))
logger.debug("[QWEN] user content as prompt: {}".format(user_content))
return user_content, history
def get_completion_content(self, response, node_id):
if not response['Success']:
return f"[ERROR]\n{response['Code']}:{response['Message']}"
text = response['Data']['Text']
if node_id == '':
return text
# TODO: 当使用流程编排创建大模型应用时,响应结构如下,最终结果在['finalResult'][node_id]['response']['text']中,暂时先这么写
# {
# 'Success': True,
# 'Code': None,
# 'Message': None,
# 'Data': {
# 'ResponseId': '9822f38dbacf4c9b8daf5ca03a2daf15',
# 'SessionId': 'session_id',
# 'Text': '{"finalResult":{"LLM_T7islK":{"params":{"modelId":"qwen-plus-v1","prompt":"${systemVars.query}${bizVars.Text}"},"response":{"text":"作为一个AI语言模型,我没有年龄,因为我没有生日。\n我只是一个程序,没有生命和身体。"}}}}',
# 'Thoughts': [],
# 'Debug': {},
# 'DocReferences': []
# },
# 'RequestId': '8e11d31551ce4c3f83f49e6e0dd998b0',
# 'Failed': None
# }
text_dict = json.loads(text)
completion_content = text_dict['finalResult'][node_id]['response']['text']
return completion_content
def calc_tokens(self, messages, completion_content):
completion_tokens = len(completion_content)
prompt_tokens = 0
for message in messages:
prompt_tokens += len(message["content"])
return completion_tokens, prompt_tokens + completion_tokens
The provided code snippet includes necessary dependencies for implementing the `create_bot` function. Write a Python function `def create_bot(bot_type)` to solve the following problem:
create a bot_type instance :param bot_type: bot type code :return: bot instance
Here is the function:
def create_bot(bot_type):
"""
create a bot_type instance
:param bot_type: bot type code
:return: bot instance
"""
if bot_type == const.BAIDU:
# 替换Baidu Unit为Baidu文心千帆对话接口
# from bot.baidu.baidu_unit_bot import BaiduUnitBot
# return BaiduUnitBot()
from bot.baidu.baidu_wenxin import BaiduWenxinBot
return BaiduWenxinBot()
elif bot_type == const.CHATGPT:
# ChatGPT 网页端web接口
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
return ChatGPTBot()
elif bot_type == const.OPEN_AI:
# OpenAI 官方对话模型API
from bot.openai.open_ai_bot import OpenAIBot
return OpenAIBot()
elif bot_type == const.CHATGPTONAZURE:
# Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/
from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot
return AzureChatGPTBot()
elif bot_type == const.XUNFEI:
from bot.xunfei.xunfei_spark_bot import XunFeiBot
return XunFeiBot()
elif bot_type == const.LINKAI:
from bot.linkai.link_ai_bot import LinkAIBot
return LinkAIBot()
elif bot_type == const.CLAUDEAI:
from bot.claude.claude_ai_bot import ClaudeAIBot
return ClaudeAIBot()
elif bot_type == const.QWEN:
from bot.ali.ali_qwen_bot import AliQwenBot
return AliQwenBot()
elif bot_type == const.GEMINI:
from bot.gemini.google_gemini_bot import GoogleGeminiBot
return GoogleGeminiBot()
elif bot_type == const.ZHIPU_AI:
from bot.zhipuai.zhipuai_bot import ZHIPUAIBot
return ZHIPUAIBot()
raise RuntimeError | create a bot_type instance :param bot_type: bot type code :return: bot instance |
9,780 | from bot.session_manager import Session
from common.log import logger
The provided code snippet includes necessary dependencies for implementing the `num_tokens_from_string` function. Write a Python function `def num_tokens_from_string(string: str, model: str) -> int` to solve the following problem:
Returns the number of tokens in a text string.
Here is the function:
def num_tokens_from_string(string: str, model: str) -> int:
"""Returns the number of tokens in a text string."""
import tiktoken
encoding = tiktoken.encoding_for_model(model)
num_tokens = len(encoding.encode(string, disallowed_special=()))
return num_tokens | Returns the number of tokens in a text string. |
9,781 | from bot.session_manager import Session
from common.log import logger
from common import const
def num_tokens_by_character(messages):
"""Returns the number of tokens used by a list of messages."""
tokens = 0
for msg in messages:
tokens += len(msg["content"])
return tokens
=
The provided code snippet includes necessary dependencies for implementing the `num_tokens_from_messages` function. Write a Python function `def num_tokens_from_messages(messages, model)` to solve the following problem:
Returns the number of tokens used by a list of messages.
Here is the function:
def num_tokens_from_messages(messages, model):
"""Returns the number of tokens used by a list of messages."""
if model in ["wenxin", "xunfei", const.GEMINI]:
return num_tokens_by_character(messages)
import tiktoken
if model in ["gpt-3.5-turbo-0301", "gpt-35-turbo", "gpt-3.5-turbo-1106"]:
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", "gpt-4-turbo-preview",
"gpt-4-1106-preview", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW]:
return num_tokens_from_messages(messages, model="gpt-4")
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.debug("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif model == "gpt-4":
tokens_per_message = 3
tokens_per_name = 1
else:
logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo.")
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens | Returns the number of tokens used by a list of messages. |
9,782 | from bot.session_manager import Session
from common.log import logger
The provided code snippet includes necessary dependencies for implementing the `num_tokens_from_messages` function. Write a Python function `def num_tokens_from_messages(messages, model)` to solve the following problem:
Returns the number of tokens used by a list of messages.
Here is the function:
def num_tokens_from_messages(messages, model):
"""Returns the number of tokens used by a list of messages."""
tokens = 0
for msg in messages:
# 官方token计算规则暂不明确: "大约为 token数为 "中文字 + 其他语种单词数 x 1.3"
# 这里先直接根据字数粗略估算吧,暂不影响正常使用,仅在判断是否丢弃历史会话的时候会有偏差
tokens += len(msg["content"])
return tokens | Returns the number of tokens used by a list of messages. |
9,783 | import requests, json
from bot.bot import Bot
from bot.session_manager import SessionManager
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from bridge.context import ContextType, Context
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
from common import const
import time
import _thread as thread
import datetime
from datetime import datetime
from wsgiref.handlers import format_date_time
from urllib.parse import urlencode
import base64
import ssl
import hashlib
import hmac
import json
from time import mktime
from urllib.parse import urlparse
import websocket
import queue
import threading
import random
logger.error(f"[XunFei] error: {str(error)}"
logger.info(f"[XunFei] Start websocket, session_id={ws.session_id}")
=
def on_error(ws, error):
logger.error(f"[XunFei] error: {str(error)}") | null |
9,784 | import requests, json
from bot.bot import Bot
from bot.session_manager import SessionManager
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from bridge.context import ContextType, Context
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
from common import const
import time
import _thread as thread
import datetime
from datetime import datetime
from wsgiref.handlers import format_date_time
from urllib.parse import urlencode
import base64
import ssl
import hashlib
import hmac
import json
from time import mktime
from urllib.parse import urlparse
import websocket
import queue
import threading
import random
data_queue = queue_map.get(ws.session_id)
data_queue.put("END")
def on_close(ws, one, two):
data_queue = queue_map.get(ws.session_id)
data_queue.put("END") | null |
9,785 | import requests, json
from bot.bot import Bot
from bot.session_manager import SessionManager
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from bridge.context import ContextType, Context
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
from common import const
import time
import _thread as thread
import datetime
from datetime import datetime
from wsgiref.handlers import format_date_time
from urllib.parse import urlencode
import base64
import ssl
import hashlib
import hmac
import json
from time import mktime
from urllib.parse import urlparse
import websocket
import queue
import threading
import random
logger.error(f"[XunFei] error: {str(error)}"
logger.info(f"[XunFei] Start websocket, session_id={ws.session_id}")
thread.start_new_thread(run, (ws, ))
def run(ws, *args):
=
def on_open(ws):
logger.info(f"[XunFei] Start websocket, session_id={ws.session_id}")
thread.start_new_thread(run, (ws, )) | null |
9,786 | import requests, json
from bot.bot import Bot
from bot.session_manager import SessionManager
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from bridge.context import ContextType, Context
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
from common import const
import time
import _thread as thread
import datetime
from datetime import datetime
from wsgiref.handlers import format_date_time
from urllib.parse import urlencode
import base64
import ssl
import hashlib
import hmac
import json
from time import mktime
from urllib.parse import urlparse
import websocket
import queue
import threading
import random
class ReplyItem:
def __init__(self, reply, usage=None, is_end=False):
self.is_end = is_end
self.reply = reply
self.usage = usage
error):
logger.error(f"[XunFei] error: {str(error)}"
data_queue = queue_map.get(ws.session_id)
data_queue.put("END")
logger.info(f"[XunFei] Start websocket, session_id={ws.session_id}")
data = json.dumps(
gen_params(appid=ws.appid,
domain=ws.domain,
question=ws.question,
temperature=ws.temperature))
code = data['header']['code']
if code != 0:
logger.error(f'请求错误: {code}, {data}')
ws.close()
else:
choices = data["payload"]["choices"]
status = choices["status"]
content = choices["text"][0]["content"]
data_queue = queue_map.get(ws.session_id)
if not data_queue:
logger.error(
f"[XunFei] can't find data queue, session_id={ws.session_id}")
return
reply_item = ReplyItem(content)
if status == 2:
usage = data["payload"].get("usage")
reply_item = ReplyItem(content, usage)
reply_item.is_end = True
ws.close()
data_queue.put(reply_item)
=
def on_message(ws, message):
data = json.loads(message)
code = data['header']['code']
if code != 0:
logger.error(f'请求错误: {code}, {data}')
ws.close()
else:
choices = data["payload"]["choices"]
status = choices["status"]
content = choices["text"][0]["content"]
data_queue = queue_map.get(ws.session_id)
if not data_queue:
logger.error(
f"[XunFei] can't find data queue, session_id={ws.session_id}")
return
reply_item = ReplyItem(content)
if status == 2:
usage = data["payload"].get("usage")
reply_item = ReplyItem(content, usage)
reply_item.is_end = True
ws.close()
data_queue.put(reply_item) | null |
9,787 | from bot.session_manager import Session
from common.log import logger
def num_tokens_from_messages(messages, model):
tokens = 0
for msg in messages:
tokens += len(msg["content"])
return tokens | null |
9,788 | from bot.session_manager import Session
from common.log import logger
The provided code snippet includes necessary dependencies for implementing the `num_tokens_from_messages` function. Write a Python function `def num_tokens_from_messages(messages, model)` to solve the following problem:
Returns the number of tokens used by a list of messages.
Here is the function:
def num_tokens_from_messages(messages, model):
"""Returns the number of tokens used by a list of messages."""
# 官方token计算规则:"对于中文文本来说,1个token通常对应一个汉字;对于英文文本来说,1个token通常对应3至4个字母或1个单词"
# 详情请产看文档:https://help.aliyun.com/document_detail/2586397.html
# 目前根据字符串长度粗略估计token数,不影响正常使用
tokens = 0
for msg in messages:
tokens += len(msg["content"])
return tokens | Returns the number of tokens used by a list of messages. |
9,789 | import json
import logging
import os
import pickle
from common.log import logger
名 会 图
指
def get_root():
def conf():
=
def get_appdata_dir():
data_path = os.path.join(get_root(), conf().get("appdata_dir", ""))
if not os.path.exists(data_path):
logger.info("[INIT] data path not exists, create it: {}".format(data_path))
os.makedirs(data_path)
return data_path | null |
9,790 | import json
import logging
import os
import pickle
from common.log import logger
名 会 图
指
def conf():
def subscribe_msg():
trigger_prefix = conf().get("single_chat_prefix", [""])[0]
msg = conf().get("subscribe_msg", "")
return msg.format(trigger_prefix=trigger_prefix) | null |
9,791 | import json
import logging
import os
import pickle
from common.log import logger
名 会 图
指
plugin_config = {}
The provided code snippet includes necessary dependencies for implementing the `write_plugin_config` function. Write a Python function `def write_plugin_config(pconf: dict)` to solve the following problem:
写入插件全局配置 :param pconf: 全量插件配置
Here is the function:
def write_plugin_config(pconf: dict):
"""
写入插件全局配置
:param pconf: 全量插件配置
"""
global plugin_config
for k in pconf:
plugin_config[k.lower()] = pconf[k] | 写入插件全局配置 :param pconf: 全量插件配置 |
9,792 | import json
import logging
import os
import pickle
from common.log import logger
名 会 图
指
plugin_config = {}
The provided code snippet includes necessary dependencies for implementing the `pconf` function. Write a Python function `def pconf(plugin_name: str) -> dict` to solve the following problem:
根据插件名称获取配置 :param plugin_name: 插件名称 :return: 该插件的配置项
Here is the function:
def pconf(plugin_name: str) -> dict:
"""
根据插件名称获取配置
:param plugin_name: 插件名称
:return: 该插件的配置项
"""
return plugin_config.get(plugin_name.lower()) | 根据插件名称获取配置 :param plugin_name: 插件名称 :return: 该插件的配置项 |
9,793 | import os
import re
import threading
import time
from asyncio import CancelledError
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent import futures
from bridge.context import *
from bridge.reply import *
from channel.channel import Channel
from common.dequeue import Dequeue
from common import memory
from plugins import *
def check_prefix(content, prefix_list):
if not prefix_list:
return None
for prefix in prefix_list:
if content.startswith(prefix):
return prefix
return None | null |
9,794 | import os
import re
import threading
import time
from asyncio import CancelledError
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent import futures
from bridge.context import *
from bridge.reply import *
from channel.channel import Channel
from common.dequeue import Dequeue
from common import memory
from plugins import *
def check_contain(content, keyword_list):
if not keyword_list:
return None
for ky in keyword_list:
if content.find(ky) != -1:
return True
return None | null |
9,795 | import web
from wechatpy.crypto import WeChatCrypto
from wechatpy.exceptions import InvalidSignatureException
from wechatpy.utils import check_signature
from config import conf
# openai apibase,当use_azure_chatgpt为true时,需要设置对应的api base
"open_ai_api_base": "https://api.openai.com/v1",
"proxy": "", # openai使用的代理
# chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称
"model": "gpt-3.5-turbo", # 还支持 gpt-4, gpt-4-turbo, wenxin, xunfei, qwen
"use_azure_chatgpt": False, # 是否使用azure的chatgpt
"azure_deployment_id": "", # azure 模型部署名称
"azure_api_version": "", # azure api版本
# Bot触发配置
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人
"single_chat_reply_suffix": "", # 私聊时自动回复的后缀,\n 可以换行
"group_chat_prefix": ["@bot"], # 群聊时包含该前缀则会触发机器人回复
"group_chat_reply_prefix": "", # 群聊时自动回复的前缀
"group_chat_reply_suffix": "", # 群聊时自动回复的后缀,\n 可以换行
"group_chat_keyword": [], # 群聊时包含该关键词则会触发机器人回复
"group_at_off": False, # 是否关闭群聊时@bot的触发
"group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"], # 开启自动回复的群名称列表
"group_name_keyword_white_list": [], # 开启自动回复的群名称关键词列表
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
"nick_name_black_list": [], # 用户昵称黑名单
"group_welcome_msg": "", # 配置新人进群固定欢迎语,不配置则使用随机风格欢迎
"trigger_by_self": False, # 是否允许机器人触发
"text_to_image": "dall-e-2", # 图片生成模型,可选 dall-e-2, dall-e-3
"image_proxy": True, # 是否需要图片代理,国内访问LinkAI时需要
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
"concurrency_in_session": 1, # 同一会话最多有多少条消息在处理中,大于1可能乱序
"image_create_size": "256x256", # 图片大小,可选有 256x256, 512x512, 1024x1024 (dall-e-3默认为1024x1024)
"group_chat_exit_group": False,
# chatgpt会话参数
"expires_in_seconds": 3600, # 无操作会话的过期时间
# 人格描述
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
"conversation_max_tokens": 1000, # 支持上下文记忆的最多字符数
# chatgpt限流配置
"rate_limit_chatgpt": 20, # chatgpt的调用频率限制
"rate_limit_dalle": 50, # openai dalle的调用频率限制
# chatgpt api参数 参考https://platform.openai.com/docs/api-reference/chat/create
"temperature": 0.9,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"request_timeout": 180, # chatgpt请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": 120, # chatgpt重试超时时间,在这个时间内,将会自动重试
# Baidu 文心一言参数
"baidu_wenxin_model": "eb-instant", # 默认使用ERNIE-Bot-turbo模型
"baidu_wenxin_api_key": "", # Baidu api key
"baidu_wenxin_secret_key": "", # Baidu secret key
# 讯飞星火API
"xunfei_app_id": "", # 讯飞应用ID
"xunfei_api_key": "", # 讯飞 API key
"xunfei_api_secret": "", # 讯飞 API secret
# claude 配置
"claude_api_cookie": "",
"claude_uuid": "",
# 通义千问API, 获取方式查看文档 https://help.aliyun.com/document_detail/2587494.html
"qwen_access_key_id": "",
"qwen_access_key_secret": "",
"qwen_agent_key": "",
"qwen_app_id": "",
"qwen_node_id": "", # 流程编排模型用到的id,如果没有用到qwen_node_id,请务必保持为空字符串
# Google Gemini Api Key
"gemini_api_key": "",
# wework的通用配置
"wework_smart": True, # 配置wework是否使用已登录的企业微信,False为多开
# 语音设置
"speech_recognition": True, # 是否开启语音识别
"group_speech_recognition": False, # 是否开启群组语音识别
"voice_reply_voice": False, # 是否使用语音回复语音,需要设置对应语音合成引擎的api key
"always_reply_voice": False, # 是否一直使用语音回复
"voice_to_text": "openai", # 语音识别引擎,支持openai,baidu,google,azure
"text_to_voice": "openai", # 语音合成引擎,支持openai,baidu,google,pytts(offline),azure,elevenlabs
"text_to_voice_model": "tts-1",
"tts_voice_id": "alloy",
# baidu 语音api配置, 使用百度语音识别和语音合成时需要
"baidu_app_id": "",
"baidu_api_key": "",
"baidu_secret_key": "",
# 1536普通话(支持简单的英文识别) 1737英语 1637粤语 1837四川话 1936普通话远场
"baidu_dev_pid": "1536",
# azure 语音api配置, 使用azure语音识别和语音合成时需要
"azure_voice_api_key": "",
"azure_voice_region": "japaneast",
# elevenlabs 语音api配置
"xi_api_key": "", #获取ap的方法可以参考https://docs.elevenlabs.io/api-reference/quick-start/authentication
"xi_voice_id": "", #ElevenLabs提供了9种英式、美式等英语发音id,分别是“Adam/Antoni/Arnold/Bella/Domi/Elli/Josh/Rachel/Sam”
# 服务时间限制,目前支持itchat
"chat_time_module": False, # 是否开启服务时间限制
"chat_start_time": "00:00", # 服务开始时间
"chat_stop_time": "24:00", # 服务结束时间
# 翻译api
"translate": "baidu", # 翻译api,支持baidu
# baidu翻译api的配置
"baidu_translate_app_id": "", # 百度翻译api的appid
"baidu_translate_app_key": "", # 百度翻译api的秘钥
# itchat的配置
"hot_reload": False, # 是否开启热重载
# wechaty的配置
"wechaty_puppet_service_token": "", # wechaty的token
# wechatmp的配置
"wechatmp_token": "", # 微信公众平台的Token
"wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
"wechatmp_app_id": "", # 微信公众平台的appID
"wechatmp_app_secret": "", # 微信公众平台的appsecret
"wechatmp_aes_key": "", # 微信公众平台的EncodingAESKey,加密模式需要
# wechatcom的通用配置
"wechatcom_corp_id": "", # 企业微信公司的corpID
# wechatcomapp的配置
"wechatcomapp_token": "", # 企业微信app的token
"wechatcomapp_port": 9898, # 企业微信app的服务端口,不需要端口转发
"wechatcomapp_secret": "", # 企业微信app的secret
"wechatcomapp_agent_id": "", # 企业微信app的agent_id
"wechatcomapp_aes_key": "", # 企业微信app的aes_key
# 飞书配置
"feishu_port": 80, # 飞书bot监听端口
"feishu_app_id": "", # 飞书机器人应用APP Id
"feishu_app_secret": "", # 飞书机器人APP secret
"feishu_token": "", # 飞书 verification token
"feishu_bot_name": "", # 飞书机器人的名字
# 钉钉配置
"dingtalk_client_id": "", # 钉钉机器人Client ID
"dingtalk_client_secret": "", # 钉钉机器人Client Secret
# chatgpt指令自定义触发词
"clear_memory_commands": ["#清除记忆"], # 重置会话指令,必须以#开头
# channel配置
"channel_type": "wx", # 通道类型,支持:{wx,wxy,terminal,wechatmp,wechatmp_service,wechatcom_app}
"subscribe_msg": "", # 订阅消息, 支持: wechatmp, wechatmp_service, wechatcom_app
"debug": False, # 是否开启debug模式,开启后会打印更多日志
"appdata_dir": "", # 数据目录
# 插件配置
"plugin_trigger_prefix": "$", # 规范插件提供聊天相关指令的前缀,建议不要和管理员指令前缀"#"冲突
# 是否使用全局插件配置
"use_global_plugin_config": False,
"max_media_send_count": 3, # 单次最大发送媒体资源的个数
"media_send_interval": 1, # 发送图片的事件间隔,单位秒
# 智谱AI 平台配置
"zhipu_ai_api_key": "",
"zhipu_ai_api_base": "https://open.bigmodel.cn/api/paas/v4",
# LinkAI平台配置
"use_linkai": False,
"linkai_api_key": "",
"linkai_app_code": "",
"linkai_api_base": "https://api.link-ai.chat", # linkAI服务地址,若国内无法访问或延迟较高可改为 https://api.link-ai.tech
}
def conf():
[]
}
def verify_server(data):
try:
signature = data.signature
timestamp = data.timestamp
nonce = data.nonce
echostr = data.get("echostr", None)
token = conf().get("wechatmp_token") # 请按照公众平台官网\基本配置中信息填写
check_signature(token, signature, timestamp, nonce)
return echostr
except InvalidSignatureException:
raise web.Forbidden("Invalid signature")
except Exception as e:
raise web.Forbidden(str(e)) | null |
9,796 | import os
import time
os.environ['ntwork_LOG'] = "ERROR"
import ntwork
def forever():
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
ntwork.exit_()
os._exit(0) | null |
9,797 | import datetime
import json
import os
import re
import time
import pilk
from bridge.context import ContextType
from channel.chat_message import ChatMessage
from common.log import logger
from ntwork.const import send_type
=
def get_with_retry(get_func, max_retries=5, delay=5):
retries = 0
result = None
while retries < max_retries:
result = get_func()
if result:
break
logger.warning(f"获取数据失败,重试第{retries + 1}次······")
retries += 1
time.sleep(delay) # 等待一段时间后重试
return result | null |
9,798 | import datetime
import json
import os
import re
import time
import pilk
from bridge.context import ContextType
from channel.chat_message import ChatMessage
from common.log import logger
from ntwork.const import send_type
=
def get_room_info(wework, conversation_id):
logger.debug(f"传入的 conversation_id: {conversation_id}")
rooms = wework.get_rooms()
if not rooms or 'room_list' not in rooms:
logger.error(f"获取群聊信息失败: {rooms}")
return None
time.sleep(1)
logger.debug(f"获取到的群聊信息: {rooms}")
for room in rooms['room_list']:
if room['conversation_id'] == conversation_id:
return room
return None | null |
9,799 | import datetime
import json
import os
import re
import time
import pilk
from bridge.context import ContextType
from channel.chat_message import ChatMessage
from common.log import logger
from ntwork.const import send_type
=
def cdn_download(wework, message, file_name):
data = message["data"]
aes_key = data["cdn"]["aes_key"]
file_size = data["cdn"]["size"]
# 获取当前工作目录,然后与文件名拼接得到保存路径
current_dir = os.getcwd()
save_path = os.path.join(current_dir, "tmp", file_name)
# 下载保存图片到本地
if "url" in data["cdn"].keys() and "auth_key" in data["cdn"].keys():
url = data["cdn"]["url"]
auth_key = data["cdn"]["auth_key"]
# result = wework.wx_cdn_download(url, auth_key, aes_key, file_size, save_path) # ntwork库本身接口有问题,缺失了aes_key这个参数
"""
下载wx类型的cdn文件,以https开头
"""
data = {
'url': url,
'auth_key': auth_key,
'aes_key': aes_key,
'size': file_size,
'save_path': save_path
}
result = wework._WeWork__send_sync(send_type.MT_WXCDN_DOWNLOAD_MSG, data) # 直接用wx_cdn_download的接口内部实现来调用
elif "file_id" in data["cdn"].keys():
if message["type"] == 11042:
file_type = 2
elif message["type"] == 11045:
file_type = 5
file_id = data["cdn"]["file_id"]
result = wework.c2c_cdn_download(file_id, aes_key, file_size, file_type, save_path)
else:
logger.error(f"something is wrong, data: {data}")
return
# 输出下载结果
logger.debug(f"result: {result}") | null |
9,800 | import datetime
import json
import os
import re
import time
import pilk
from bridge.context import ContextType
from channel.chat_message import ChatMessage
from common.log import logger
from ntwork.const import send_type
=
def c2c_download_and_convert(wework, message, file_name):
data = message["data"]
aes_key = data["cdn"]["aes_key"]
file_size = data["cdn"]["size"]
file_type = 5
file_id = data["cdn"]["file_id"]
current_dir = os.getcwd()
save_path = os.path.join(current_dir, "tmp", file_name)
result = wework.c2c_cdn_download(file_id, aes_key, file_size, file_type, save_path)
logger.debug(result)
# 在下载完SILK文件之后,立即将其转换为WAV文件
base_name, _ = os.path.splitext(save_path)
wav_file = base_name + ".wav"
pilk.silk_to_wav(save_path, wav_file, rate=24000)
# 删除SILK文件
try:
os.remove(save_path)
except Exception as e:
pass | null |
9,801 | import io
import os
import random
import tempfile
import threading
import ntwork
import requests
import uuid
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel.wework.wework_message import *
from channel.wework.wework_message import WeworkMessage
from common.singleton import singleton
from common.log import logger
from common.time_check import time_checker
from common.utils import compress_imgfile, fsize
from config import conf
from channel.wework.run import wework
from channel.wework import run
from PIL import Image
def get_wxid_by_name(room_members, group_wxid, name):
if group_wxid in room_members:
for member in room_members[group_wxid]['member_list']:
if member['room_nickname'] == name or member['username'] == name:
return member['user_id']
return None # 如果没有找到对应的group_wxid或name,则返回None | null |
9,802 | import io
import os
import random
import tempfile
import threading
os.environ['ntwork_LOG'] = "ERROR"
import ntwork
import requests
import uuid
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel.wework.wework_message import *
from channel.wework.wework_message import WeworkMessage
from common.singleton import singleton
from common.log import logger
from common.time_check import time_checker
from common.utils import compress_imgfile, fsize
from config import conf
from channel.wework.run import wework
from channel.wework import run
from PIL import Image
=
def fsize(file):
if isinstance(file, io.BytesIO):
return file.getbuffer().nbytes
elif isinstance(file, str):
return os.path.getsize(file)
elif hasattr(file, "seek") and hasattr(file, "tell"):
pos = file.tell()
file.seek(0, os.SEEK_END)
size = file.tell()
file.seek(pos)
return size
else:
raise TypeError("Unsupported type")
def compress_imgfile(file, max_size):
if fsize(file) <= max_size:
return file
file.seek(0)
img = Image.open(file)
rgb_image = img.convert("RGB")
quality = 95
while True:
out_buf = io.BytesIO()
rgb_image.save(out_buf, "JPEG", quality=quality)
if fsize(out_buf) <= max_size:
return out_buf
quality -= 5
def download_and_compress_image(url, filename, quality=30):
# 确定保存图片的目录
directory = os.path.join(os.getcwd(), "tmp")
# 如果目录不存在,则创建目录
if not os.path.exists(directory):
os.makedirs(directory)
# 下载图片
pic_res = requests.get(url, stream=True)
image_storage = io.BytesIO()
for block in pic_res.iter_content(1024):
image_storage.write(block)
# 检查图片大小并可能进行压缩
sz = fsize(image_storage)
if sz >= 10 * 1024 * 1024: # 如果图片大于 10 MB
logger.info("[wework] image too large, ready to compress, sz={}".format(sz))
image_storage = compress_imgfile(image_storage, 10 * 1024 * 1024 - 1)
logger.info("[wework] image compressed, sz={}".format(fsize(image_storage)))
# 将内存缓冲区的指针重置到起始位置
image_storage.seek(0)
# 读取并保存图片
image = Image.open(image_storage)
image_path = os.path.join(directory, f"{filename}.png")
image.save(image_path, "png")
return image_path | null |
9,803 | import io
import os
import random
import tempfile
import threading
os.environ['ntwork_LOG'] = "ERROR"
import ntwork
import requests
import uuid
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel.wework.wework_message import *
from channel.wework.wework_message import WeworkMessage
from common.singleton import singleton
from common.log import logger
from common.time_check import time_checker
from common.utils import compress_imgfile, fsize
from config import conf
from channel.wework.run import wework
from channel.wework import run
from PIL import Image
=
def download_video(url, filename):
# 确定保存视频的目录
directory = os.path.join(os.getcwd(), "tmp")
# 如果目录不存在,则创建目录
if not os.path.exists(directory):
os.makedirs(directory)
# 下载视频
response = requests.get(url, stream=True)
total_size = 0
video_path = os.path.join(directory, f"{filename}.mp4")
with open(video_path, 'wb') as f:
for block in response.iter_content(1024):
total_size += len(block)
# 如果视频的总大小超过30MB (30 * 1024 * 1024 bytes),则停止下载并返回
if total_size > 30 * 1024 * 1024:
logger.info("[WX] Video is larger than 30MB, skipping...")
return None
f.write(block)
return video_path | null |
9,804 | import io
import os
import random
import tempfile
import threading
import ntwork
import requests
import uuid
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel.wework.wework_message import *
from channel.wework.wework_message import WeworkMessage
from common.singleton import singleton
from common.log import logger
from common.time_check import time_checker
from common.utils import compress_imgfile, fsize
from config import conf
from channel.wework.run import wework
from channel.wework import run
from PIL import Image
=
def _check(func):
def wrapper(self, cmsg: ChatMessage):
msgId = cmsg.msg_id
create_time = cmsg.create_time # 消息时间戳
if create_time is None:
return func(self, cmsg)
if int(create_time) < int(time.time()) - 60: # 跳过1分钟前的历史消息
logger.debug("[WX]history message {} skipped".format(msgId))
return
return func(self, cmsg)
return wrapper | null |
9,805 | import io
import os
import random
import tempfile
import threading
import ntwork
import requests
import uuid
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel.wework.wework_message import *
from channel.wework.wework_message import WeworkMessage
from common.singleton import singleton
from common.log import logger
from common.time_check import time_checker
from common.utils import compress_imgfile, fsize
from config import conf
from channel.wework.run import wework
from channel.wework import run
from PIL import Image
def create_message(wework_instance, message, is_group):
logger.debug(f"正在为{'群聊' if is_group else '单聊'}创建 WeworkMessage")
cmsg = WeworkMessage(message, wework=wework_instance, is_group=is_group)
logger.debug(f"cmsg:{cmsg}")
return cmsg
def handle_message(cmsg, is_group):
logger.debug(f"准备用 WeworkChannel 处理{'群聊' if is_group else '单聊'}消息")
if is_group:
WeworkChannel().handle_group(cmsg)
else:
WeworkChannel().handle_single(cmsg)
logger.debug(f"已用 WeworkChannel 处理完{'群聊' if is_group else '单聊'}消息")
=
def all_msg_handler(wework_instance: ntwork.WeWork, message):
logger.debug(f"收到消息: {message}")
if 'data' in message:
# 首先查找conversation_id,如果没有找到,则查找room_conversation_id
conversation_id = message['data'].get('conversation_id', message['data'].get('room_conversation_id'))
if conversation_id is not None:
is_group = "R:" in conversation_id
try:
cmsg = create_message(wework_instance=wework_instance, message=message, is_group=is_group)
except NotImplementedError as e:
logger.error(f"[WX]{message.get('MsgId', 'unknown')} 跳过: {e}")
return None
delay = random.randint(1, 2)
timer = threading.Timer(delay, handle_message, args=(cmsg, is_group))
timer.start()
else:
logger.debug("消息数据中无 conversation_id")
return None
return None | null |
9,806 | import io
import os
import random
import tempfile
import threading
import ntwork
import requests
import uuid
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel.wework.wework_message import *
from channel.wework.wework_message import WeworkMessage
from common.singleton import singleton
from common.log import logger
from common.time_check import time_checker
from common.utils import compress_imgfile, fsize
from config import conf
from channel.wework.run import wework
from channel.wework import run
from PIL import Image
=
def accept_friend_with_retries(wework_instance, user_id, corp_id):
result = wework_instance.accept_friend(user_id, corp_id)
logger.debug(f'result:{result}') | null |
9,807 | import io
import os
import random
import tempfile
import threading
import ntwork
import requests
import uuid
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel.wework.wework_message import *
from channel.wework.wework_message import WeworkMessage
from common.singleton import singleton
from common.log import logger
from common.time_check import time_checker
from common.utils import compress_imgfile, fsize
from config import conf
from channel.wework.run import wework
from channel.wework import run
from PIL import Image
=
def get_with_retry(get_func, max_retries=5, delay=5):
retries = 0
result = None
while retries < max_retries:
result = get_func()
if result:
break
logger.warning(f"获取数据失败,重试第{retries + 1}次······")
retries += 1
time.sleep(delay) # 等待一段时间后重试
return result | null |
9,808 | import io
import json
import os
import threading
import time
import requests
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel import chat_channel
from channel.wechat.wechat_message import *
from common.expired_dict import ExpiredDict
from common.log import logger
from common.singleton import singleton
from common.time_check import time_checker
from config import conf, get_appdata_dir
from lib import itchat
from lib.itchat.content import *
class WechatChannel(ChatChannel):
def __init__(self):
def startup(self):
def exitCallback(self):
def loginCallback(self):
def handle_single(self, cmsg: ChatMessage):
def handle_group(self, cmsg: ChatMessage):
def send(self, reply: Reply, context: Context):
=
def handler_single_msg(msg):
try:
cmsg = WechatMessage(msg, False)
except NotImplementedError as e:
logger.debug("[WX]single message {} skipped: {}".format(msg["MsgId"], e))
return None
WechatChannel().handle_single(cmsg)
return None | null |
9,809 | import io
import json
import os
import threading
import time
import requests
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel import chat_channel
from channel.wechat.wechat_message import *
from common.expired_dict import ExpiredDict
from common.log import logger
from common.singleton import singleton
from common.time_check import time_checker
from config import conf, get_appdata_dir
from lib import itchat
from lib.itchat.content import *
class WechatChannel(ChatChannel):
def __init__(self):
def startup(self):
def exitCallback(self):
def loginCallback(self):
def handle_single(self, cmsg: ChatMessage):
def handle_group(self, cmsg: ChatMessage):
def send(self, reply: Reply, context: Context):
=
def handler_group_msg(msg):
try:
cmsg = WechatMessage(msg, True)
except NotImplementedError as e:
logger.debug("[WX]group message {} skipped: {}".format(msg["MsgId"], e))
return None
WechatChannel().handle_group(cmsg)
return None | null |
9,810 | import io
import json
import os
import threading
import time
import requests
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel import chat_channel
from channel.wechat.wechat_message import *
from common.expired_dict import ExpiredDict
from common.log import logger
from common.singleton import singleton
from common.time_check import time_checker
from config import conf, get_appdata_dir
from lib import itchat
from lib.itchat.content import *
=
# openai apibase,当use_azure_chatgpt为true时,需要设置对应的api base
"open_ai_api_base": "https://api.openai.com/v1",
"proxy": "", # openai使用的代理
# chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称
"model": "gpt-3.5-turbo", # 还支持 gpt-4, gpt-4-turbo, wenxin, xunfei, qwen
"use_azure_chatgpt": False, # 是否使用azure的chatgpt
"azure_deployment_id": "", # azure 模型部署名称
"azure_api_version": "", # azure api版本
# Bot触发配置
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人
"single_chat_reply_suffix": "", # 私聊时自动回复的后缀,\n 可以换行
"group_chat_prefix": ["@bot"], # 群聊时包含该前缀则会触发机器人回复
"group_chat_reply_prefix": "", # 群聊时自动回复的前缀
"group_chat_reply_suffix": "", # 群聊时自动回复的后缀,\n 可以换行
"group_chat_keyword": [], # 群聊时包含该关键词则会触发机器人回复
"group_at_off": False, # 是否关闭群聊时@bot的触发
"group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"], # 开启自动回复的群名称列表
"group_name_keyword_white_list": [], # 开启自动回复的群名称关键词列表
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
"nick_name_black_list": [], # 用户昵称黑名单
"group_welcome_msg": "", # 配置新人进群固定欢迎语,不配置则使用随机风格欢迎
"trigger_by_self": False, # 是否允许机器人触发
"text_to_image": "dall-e-2", # 图片生成模型,可选 dall-e-2, dall-e-3
"image_proxy": True, # 是否需要图片代理,国内访问LinkAI时需要
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
"concurrency_in_session": 1, # 同一会话最多有多少条消息在处理中,大于1可能乱序
"image_create_size": "256x256", # 图片大小,可选有 256x256, 512x512, 1024x1024 (dall-e-3默认为1024x1024)
"group_chat_exit_group": False,
# chatgpt会话参数
"expires_in_seconds": 3600, # 无操作会话的过期时间
# 人格描述
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
"conversation_max_tokens": 1000, # 支持上下文记忆的最多字符数
# chatgpt限流配置
"rate_limit_chatgpt": 20, # chatgpt的调用频率限制
"rate_limit_dalle": 50, # openai dalle的调用频率限制
# chatgpt api参数 参考https://platform.openai.com/docs/api-reference/chat/create
"temperature": 0.9,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"request_timeout": 180, # chatgpt请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": 120, # chatgpt重试超时时间,在这个时间内,将会自动重试
# Baidu 文心一言参数
"baidu_wenxin_model": "eb-instant", # 默认使用ERNIE-Bot-turbo模型
"baidu_wenxin_api_key": "", # Baidu api key
"baidu_wenxin_secret_key": "", # Baidu secret key
# 讯飞星火API
"xunfei_app_id": "", # 讯飞应用ID
"xunfei_api_key": "", # 讯飞 API key
"xunfei_api_secret": "", # 讯飞 API secret
# claude 配置
"claude_api_cookie": "",
"claude_uuid": "",
# 通义千问API, 获取方式查看文档 https://help.aliyun.com/document_detail/2587494.html
"qwen_access_key_id": "",
"qwen_access_key_secret": "",
"qwen_agent_key": "",
"qwen_app_id": "",
"qwen_node_id": "", # 流程编排模型用到的id,如果没有用到qwen_node_id,请务必保持为空字符串
# Google Gemini Api Key
"gemini_api_key": "",
# wework的通用配置
"wework_smart": True, # 配置wework是否使用已登录的企业微信,False为多开
# 语音设置
"speech_recognition": True, # 是否开启语音识别
"group_speech_recognition": False, # 是否开启群组语音识别
"voice_reply_voice": False, # 是否使用语音回复语音,需要设置对应语音合成引擎的api key
"always_reply_voice": False, # 是否一直使用语音回复
"voice_to_text": "openai", # 语音识别引擎,支持openai,baidu,google,azure
"text_to_voice": "openai", # 语音合成引擎,支持openai,baidu,google,pytts(offline),azure,elevenlabs
"text_to_voice_model": "tts-1",
"tts_voice_id": "alloy",
# baidu 语音api配置, 使用百度语音识别和语音合成时需要
"baidu_app_id": "",
"baidu_api_key": "",
"baidu_secret_key": "",
# 1536普通话(支持简单的英文识别) 1737英语 1637粤语 1837四川话 1936普通话远场
"baidu_dev_pid": "1536",
# azure 语音api配置, 使用azure语音识别和语音合成时需要
"azure_voice_api_key": "",
"azure_voice_region": "japaneast",
# elevenlabs 语音api配置
"xi_api_key": "", #获取ap的方法可以参考https://docs.elevenlabs.io/api-reference/quick-start/authentication
"xi_voice_id": "", #ElevenLabs提供了9种英式、美式等英语发音id,分别是“Adam/Antoni/Arnold/Bella/Domi/Elli/Josh/Rachel/Sam”
# 服务时间限制,目前支持itchat
"chat_time_module": False, # 是否开启服务时间限制
"chat_start_time": "00:00", # 服务开始时间
"chat_stop_time": "24:00", # 服务结束时间
# 翻译api
"translate": "baidu", # 翻译api,支持baidu
# baidu翻译api的配置
"baidu_translate_app_id": "", # 百度翻译api的appid
"baidu_translate_app_key": "", # 百度翻译api的秘钥
# itchat的配置
"hot_reload": False, # 是否开启热重载
# wechaty的配置
"wechaty_puppet_service_token": "", # wechaty的token
# wechatmp的配置
"wechatmp_token": "", # 微信公众平台的Token
"wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
"wechatmp_app_id": "", # 微信公众平台的appID
"wechatmp_app_secret": "", # 微信公众平台的appsecret
"wechatmp_aes_key": "", # 微信公众平台的EncodingAESKey,加密模式需要
# wechatcom的通用配置
"wechatcom_corp_id": "", # 企业微信公司的corpID
# wechatcomapp的配置
"wechatcomapp_token": "", # 企业微信app的token
"wechatcomapp_port": 9898, # 企业微信app的服务端口,不需要端口转发
"wechatcomapp_secret": "", # 企业微信app的secret
"wechatcomapp_agent_id": "", # 企业微信app的agent_id
"wechatcomapp_aes_key": "", # 企业微信app的aes_key
# 飞书配置
"feishu_port": 80, # 飞书bot监听端口
"feishu_app_id": "", # 飞书机器人应用APP Id
"feishu_app_secret": "", # 飞书机器人APP secret
"feishu_token": "", # 飞书 verification token
"feishu_bot_name": "", # 飞书机器人的名字
# 钉钉配置
"dingtalk_client_id": "", # 钉钉机器人Client ID
"dingtalk_client_secret": "", # 钉钉机器人Client Secret
# chatgpt指令自定义触发词
"clear_memory_commands": ["#清除记忆"], # 重置会话指令,必须以#开头
# channel配置
"channel_type": "wx", # 通道类型,支持:{wx,wxy,terminal,wechatmp,wechatmp_service,wechatcom_app}
"subscribe_msg": "", # 订阅消息, 支持: wechatmp, wechatmp_service, wechatcom_app
"debug": False, # 是否开启debug模式,开启后会打印更多日志
"appdata_dir": "", # 数据目录
# 插件配置
"plugin_trigger_prefix": "$", # 规范插件提供聊天相关指令的前缀,建议不要和管理员指令前缀"#"冲突
# 是否使用全局插件配置
"use_global_plugin_config": False,
"max_media_send_count": 3, # 单次最大发送媒体资源的个数
"media_send_interval": 1, # 发送图片的事件间隔,单位秒
# 智谱AI 平台配置
"zhipu_ai_api_key": "",
"zhipu_ai_api_base": "https://open.bigmodel.cn/api/paas/v4",
# LinkAI平台配置
"use_linkai": False,
"linkai_api_key": "",
"linkai_app_code": "",
"linkai_api_base": "https://api.link-ai.chat", # linkAI服务地址,若国内无法访问或延迟较高可改为 https://api.link-ai.tech
}
def conf():
[]
}
def _check(func):
def wrapper(self, cmsg: ChatMessage):
msgId = cmsg.msg_id
if msgId in self.receivedMsgs:
logger.info("Wechat message {} already received, ignore".format(msgId))
return
self.receivedMsgs[msgId] = True
create_time = cmsg.create_time # 消息时间戳
if conf().get("hot_reload") == True and int(create_time) < int(time.time()) - 60: # 跳过1分钟前的历史消息
logger.debug("[WX]history message {} skipped".format(msgId))
return
if cmsg.my_msg and not cmsg.is_group:
logger.debug("[WX]my message {} skipped".format(msgId))
return
return func(self, cmsg)
return wrapper | null |
9,811 | import io
import json
import os
import threading
import time
import requests
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel import chat_channel
from channel.wechat.wechat_message import *
from common.expired_dict import ExpiredDict
from common.log import logger
from common.singleton import singleton
from common.time_check import time_checker
from config import conf, get_appdata_dir
from lib import itchat
from lib.itchat.content import *
def _send_qr_code(qrcode_list: list):
def qrCallback(uuid, status, qrcode):
# logger.debug("qrCallback: {} {}".format(uuid,status))
if status == "0":
try:
from PIL import Image
img = Image.open(io.BytesIO(qrcode))
_thread = threading.Thread(target=img.show, args=("QRCode",))
_thread.setDaemon(True)
_thread.start()
except Exception as e:
pass
import qrcode
url = f"https://login.weixin.qq.com/l/{uuid}"
qr_api1 = "https://api.isoyu.com/qr/?m=1&e=L&p=20&url={}".format(url)
qr_api2 = "https://api.qrserver.com/v1/create-qr-code/?size=400×400&data={}".format(url)
qr_api3 = "https://api.pwmqr.com/qrcode/create/?url={}".format(url)
qr_api4 = "https://my.tv.sohu.com/user/a/wvideo/getQRCode.do?text={}".format(url)
print("You can also scan QRCode in any website below:")
print(qr_api3)
print(qr_api4)
print(qr_api2)
print(qr_api1)
_send_qr_code([qr_api1, qr_api2, qr_api3, qr_api4])
qr = qrcode.QRCode(border=1)
qr.add_data(url)
qr.make(fit=True)
qr.print_ascii(invert=True) | null |
9,812 | import io
import json
import os
import threading
import time
import requests
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel import chat_channel
from channel.wechat.wechat_message import *
from common.expired_dict import ExpiredDict
from common.log import logger
from common.singleton import singleton
from common.time_check import time_checker
from config import conf, get_appdata_dir
from lib import itchat
from lib.itchat.content import *
chat_client: LinkAIClient
def _send_login_success():
try:
from common.linkai_client import chat_client
if chat_client.client_id:
chat_client.send_login_success()
except Exception as e:
pass | null |
9,813 | import io
import json
import os
import threading
import time
import requests
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel import chat_channel
from channel.wechat.wechat_message import *
from common.expired_dict import ExpiredDict
from common.log import logger
from common.singleton import singleton
from common.time_check import time_checker
from config import conf, get_appdata_dir
from lib import itchat
from lib.itchat.content import *
chat_client: LinkAIClient
def _send_logout():
try:
from common.linkai_client import chat_client
if chat_client.client_id:
chat_client.send_logout()
except Exception as e:
pass | null |
9,814 |
The provided code snippet includes necessary dependencies for implementing the `get_pcm_from_wav` function. Write a Python function `def get_pcm_from_wav(wav_path)` to solve the following problem:
从 wav 文件中读取 pcm :param wav_path: wav 文件路径 :returns: pcm 数据
Here is the function:
def get_pcm_from_wav(wav_path):
"""
从 wav 文件中读取 pcm
:param wav_path: wav 文件路径
:returns: pcm 数据
"""
wav = wave.open(wav_path, "rb")
return wav.readframes(wav.getnframes()) | 从 wav 文件中读取 pcm :param wav_path: wav 文件路径 :returns: pcm 数据 |
9,815 | import shutil
import wave
from common.log import logger
try:
import pysilk
except ImportError:
logger.warn("import pysilk failed, wechaty voice message will not be supported.")
from pydub import AudioSegment
sil_supports = [8000, 12000, 16000, 24000, 32000, 44100, 48000] t_sil_supports(sample_rate):
"""
找到最接近的支持的采样率
"""
if sample_rate in sil_supports:
return sample_rate
closest = 0
mindiff = 9999999
for rate in sil_supports:
diff = abs(rate - sample_rate)
if diff < mindiff:
closest = rate
mindiff = diff
return closest
def get_pcm_from_wav(wav_path):
"""
从 wav 文件中读取 pcm
:param wav_path: wav 文件路径
:returns: pcm 数据
"""
wav = wave.open(wav_path, "rb")
return wav.readframes(wav.getnframes())
def any_to_mp3(any_path, mp3_path):
"""
把任意格式转成mp3文件
"""
if any_path.endswith(".mp3"):
shutil.copy2(any_path, mp3_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
sil_to_wav(any_path, any_path)
any_path = mp3_path
audio = AudioSegment.from_file(any_path)
audio.export(mp3_path, format="mp3")
def any_to_wav(any_path, wav_path):
"""
把任意格式转成wav文件
"""
if any_path.endswith(".wav"):
shutil.copy2(any_path, wav_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
return sil_to_wav(any_path, wav_path)
audio = AudioSegment.from_file(any_path)
audio.export(wav_path, format="wav")
def any_to_sil(any_path, sil_path):
"""
把任意格式转成sil文件
"""
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
shutil.copy2(any_path, sil_path)
return 10000
audio = AudioSegment.from_file(any_path)
rate = find_closest_sil_supports(audio.frame_rate)
# Convert to PCM_s16
pcm_s16 = audio.set_sample_width(2)
pcm_s16 = pcm_s16.set_frame_rate(rate)
wav_data = pcm_s16.raw_data
silk_data = pysilk.encode(wav_data, data_rate=rate, sample_rate=rate)
with open(sil_path, "wb") as f:
f.write(silk_data)
return audio.duration_seconds * 1000
def any_to_amr(any_path, amr_path):
"""
把任意格式转成amr文件
"""
if any_path.endswith(".amr"):
shutil.copy2(any_path, amr_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
raise NotImplementedError("Not support file type: {}".format(any_path))
audio = AudioSegment.from_file(any_path)
audio = audio.set_frame_rate(8000) # only support 8000
audio.export(amr_path, format="amr")
return audio.duration_seconds * 1000
def sil_to_wav(silk_path, wav_path, rate: int = 24000):
"""
silk 文件转 wav
"""
wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate)
with open(wav_path, "wb") as f:
f.write(wav_data)
def split_audio(file_path, max_segment_length_ms=60000):
"""
分割音频文件
"""
audio = AudioSegment.from_file(file_path)
audio_length_ms = len(audio)
if audio_length_ms <= max_segment_length_ms:
return audio_length_ms, [file_path]
segments = []
for start_ms in range(0, audio_length_ms, max_segment_length_ms):
end_ms = min(audio_length_ms, start_ms + max_segment_length_ms)
segment = audio[start_ms:end_ms]
segments.append(segment)
file_prefix = file_path[: file_path.rindex(".")]
format = file_path[file_path.rindex(".") + 1 :]
files = []
for i, segment in enumerate(segments):
path = f"{file_prefix}_{i+1}" + f".{format}"
segment.export(path, format=format)
files.append(path)
return audio_length_ms, files
The provided code snippet includes necessary dependencies for implementing the `any_to_mp3` function. Write a Python function `def any_to_mp3(any_path, mp3_path)` to solve the following problem:
把任意格式转成mp3文件
Here is the function:
def any_to_mp3(any_path, mp3_path):
"""
把任意格式转成mp3文件
"""
if any_path.endswith(".mp3"):
shutil.copy2(any_path, mp3_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
sil_to_wav(any_path, any_path)
any_path = mp3_path
audio = AudioSegment.from_file(any_path)
audio.export(mp3_path, format="mp3") | 把任意格式转成mp3文件 |
9,816 | import shutil
import wave
from common.log import logger
try:
import pysilk
except ImportError:
logger.warn("import pysilk failed, wechaty voice message will not be supported.")
from pydub import AudioSegment
sil_supports = [8000, 12000, 16000, 24000, 32000, 44100, 48000] t_sil_supports(sample_rate):
"""
找到最接近的支持的采样率
"""
if sample_rate in sil_supports:
return sample_rate
closest = 0
mindiff = 9999999
for rate in sil_supports:
diff = abs(rate - sample_rate)
if diff < mindiff:
closest = rate
mindiff = diff
return closest
def get_pcm_from_wav(wav_path):
"""
从 wav 文件中读取 pcm
:param wav_path: wav 文件路径
:returns: pcm 数据
"""
wav = wave.open(wav_path, "rb")
return wav.readframes(wav.getnframes())
def any_to_mp3(any_path, mp3_path):
"""
把任意格式转成mp3文件
"""
if any_path.endswith(".mp3"):
shutil.copy2(any_path, mp3_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
sil_to_wav(any_path, any_path)
any_path = mp3_path
audio = AudioSegment.from_file(any_path)
audio.export(mp3_path, format="mp3")
def any_to_wav(any_path, wav_path):
"""
把任意格式转成wav文件
"""
if any_path.endswith(".wav"):
shutil.copy2(any_path, wav_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
return sil_to_wav(any_path, wav_path)
audio = AudioSegment.from_file(any_path)
audio.export(wav_path, format="wav")
def any_to_sil(any_path, sil_path):
"""
把任意格式转成sil文件
"""
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
shutil.copy2(any_path, sil_path)
return 10000
audio = AudioSegment.from_file(any_path)
rate = find_closest_sil_supports(audio.frame_rate)
# Convert to PCM_s16
pcm_s16 = audio.set_sample_width(2)
pcm_s16 = pcm_s16.set_frame_rate(rate)
wav_data = pcm_s16.raw_data
silk_data = pysilk.encode(wav_data, data_rate=rate, sample_rate=rate)
with open(sil_path, "wb") as f:
f.write(silk_data)
return audio.duration_seconds * 1000
def any_to_amr(any_path, amr_path):
"""
把任意格式转成amr文件
"""
if any_path.endswith(".amr"):
shutil.copy2(any_path, amr_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
raise NotImplementedError("Not support file type: {}".format(any_path))
audio = AudioSegment.from_file(any_path)
audio = audio.set_frame_rate(8000) # only support 8000
audio.export(amr_path, format="amr")
return audio.duration_seconds * 1000
def sil_to_wav(silk_path, wav_path, rate: int = 24000):
"""
silk 文件转 wav
"""
wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate)
with open(wav_path, "wb") as f:
f.write(wav_data)
def split_audio(file_path, max_segment_length_ms=60000):
"""
分割音频文件
"""
audio = AudioSegment.from_file(file_path)
audio_length_ms = len(audio)
if audio_length_ms <= max_segment_length_ms:
return audio_length_ms, [file_path]
segments = []
for start_ms in range(0, audio_length_ms, max_segment_length_ms):
end_ms = min(audio_length_ms, start_ms + max_segment_length_ms)
segment = audio[start_ms:end_ms]
segments.append(segment)
file_prefix = file_path[: file_path.rindex(".")]
format = file_path[file_path.rindex(".") + 1 :]
files = []
for i, segment in enumerate(segments):
path = f"{file_prefix}_{i+1}" + f".{format}"
segment.export(path, format=format)
files.append(path)
return audio_length_ms, files
The provided code snippet includes necessary dependencies for implementing the `any_to_wav` function. Write a Python function `def any_to_wav(any_path, wav_path)` to solve the following problem:
把任意格式转成wav文件
Here is the function:
def any_to_wav(any_path, wav_path):
"""
把任意格式转成wav文件
"""
if any_path.endswith(".wav"):
shutil.copy2(any_path, wav_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
return sil_to_wav(any_path, wav_path)
audio = AudioSegment.from_file(any_path)
audio.export(wav_path, format="wav") | 把任意格式转成wav文件 |
9,817 | import shutil
import wave
from common.log import logger
try:
import pysilk
except ImportError:
logger.warn("import pysilk failed, wechaty voice message will not be supported.")
from pydub import AudioSegment
sil_supports = [8000, 12000, 16000, 24000, 32000, 44100, 48000] t_sil_supports(sample_rate):
"""
找到最接近的支持的采样率
"""
if sample_rate in sil_supports:
return sample_rate
closest = 0
mindiff = 9999999
for rate in sil_supports:
diff = abs(rate - sample_rate)
if diff < mindiff:
closest = rate
mindiff = diff
return closest
def get_pcm_from_wav(wav_path):
"""
从 wav 文件中读取 pcm
:param wav_path: wav 文件路径
:returns: pcm 数据
"""
wav = wave.open(wav_path, "rb")
return wav.readframes(wav.getnframes())
def any_to_mp3(any_path, mp3_path):
"""
把任意格式转成mp3文件
"""
if any_path.endswith(".mp3"):
shutil.copy2(any_path, mp3_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
sil_to_wav(any_path, any_path)
any_path = mp3_path
audio = AudioSegment.from_file(any_path)
audio.export(mp3_path, format="mp3")
def any_to_wav(any_path, wav_path):
"""
把任意格式转成wav文件
"""
if any_path.endswith(".wav"):
shutil.copy2(any_path, wav_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
return sil_to_wav(any_path, wav_path)
audio = AudioSegment.from_file(any_path)
audio.export(wav_path, format="wav")
def any_to_sil(any_path, sil_path):
"""
把任意格式转成sil文件
"""
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
shutil.copy2(any_path, sil_path)
return 10000
audio = AudioSegment.from_file(any_path)
rate = find_closest_sil_supports(audio.frame_rate)
# Convert to PCM_s16
pcm_s16 = audio.set_sample_width(2)
pcm_s16 = pcm_s16.set_frame_rate(rate)
wav_data = pcm_s16.raw_data
silk_data = pysilk.encode(wav_data, data_rate=rate, sample_rate=rate)
with open(sil_path, "wb") as f:
f.write(silk_data)
return audio.duration_seconds * 1000
def any_to_amr(any_path, amr_path):
"""
把任意格式转成amr文件
"""
if any_path.endswith(".amr"):
shutil.copy2(any_path, amr_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
raise NotImplementedError("Not support file type: {}".format(any_path))
audio = AudioSegment.from_file(any_path)
audio = audio.set_frame_rate(8000) # only support 8000
audio.export(amr_path, format="amr")
return audio.duration_seconds * 1000
def sil_to_wav(silk_path, wav_path, rate: int = 24000):
"""
silk 文件转 wav
"""
wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate)
with open(wav_path, "wb") as f:
f.write(wav_data)
def split_audio(file_path, max_segment_length_ms=60000):
"""
分割音频文件
"""
audio = AudioSegment.from_file(file_path)
audio_length_ms = len(audio)
if audio_length_ms <= max_segment_length_ms:
return audio_length_ms, [file_path]
segments = []
for start_ms in range(0, audio_length_ms, max_segment_length_ms):
end_ms = min(audio_length_ms, start_ms + max_segment_length_ms)
segment = audio[start_ms:end_ms]
segments.append(segment)
file_prefix = file_path[: file_path.rindex(".")]
format = file_path[file_path.rindex(".") + 1 :]
files = []
for i, segment in enumerate(segments):
path = f"{file_prefix}_{i+1}" + f".{format}"
segment.export(path, format=format)
files.append(path)
return audio_length_ms, files
The provided code snippet includes necessary dependencies for implementing the `any_to_sil` function. Write a Python function `def any_to_sil(any_path, sil_path)` to solve the following problem:
把任意格式转成sil文件
Here is the function:
def any_to_sil(any_path, sil_path):
"""
把任意格式转成sil文件
"""
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
shutil.copy2(any_path, sil_path)
return 10000
audio = AudioSegment.from_file(any_path)
rate = find_closest_sil_supports(audio.frame_rate)
# Convert to PCM_s16
pcm_s16 = audio.set_sample_width(2)
pcm_s16 = pcm_s16.set_frame_rate(rate)
wav_data = pcm_s16.raw_data
silk_data = pysilk.encode(wav_data, data_rate=rate, sample_rate=rate)
with open(sil_path, "wb") as f:
f.write(silk_data)
return audio.duration_seconds * 1000 | 把任意格式转成sil文件 |
9,818 | import shutil
import wave
from common.log import logger
try:
import pysilk
except ImportError:
logger.warn("import pysilk failed, wechaty voice message will not be supported.")
from pydub import AudioSegment
sil_supports = [8000, 12000, 16000, 24000, 32000, 44100, 48000] t_sil_supports(sample_rate):
"""
找到最接近的支持的采样率
"""
if sample_rate in sil_supports:
return sample_rate
closest = 0
mindiff = 9999999
for rate in sil_supports:
diff = abs(rate - sample_rate)
if diff < mindiff:
closest = rate
mindiff = diff
return closest
def get_pcm_from_wav(wav_path):
"""
从 wav 文件中读取 pcm
:param wav_path: wav 文件路径
:returns: pcm 数据
"""
wav = wave.open(wav_path, "rb")
return wav.readframes(wav.getnframes())
def any_to_mp3(any_path, mp3_path):
"""
把任意格式转成mp3文件
"""
if any_path.endswith(".mp3"):
shutil.copy2(any_path, mp3_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
sil_to_wav(any_path, any_path)
any_path = mp3_path
audio = AudioSegment.from_file(any_path)
audio.export(mp3_path, format="mp3")
def any_to_wav(any_path, wav_path):
"""
把任意格式转成wav文件
"""
if any_path.endswith(".wav"):
shutil.copy2(any_path, wav_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
return sil_to_wav(any_path, wav_path)
audio = AudioSegment.from_file(any_path)
audio.export(wav_path, format="wav")
def any_to_sil(any_path, sil_path):
"""
把任意格式转成sil文件
"""
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
shutil.copy2(any_path, sil_path)
return 10000
audio = AudioSegment.from_file(any_path)
rate = find_closest_sil_supports(audio.frame_rate)
# Convert to PCM_s16
pcm_s16 = audio.set_sample_width(2)
pcm_s16 = pcm_s16.set_frame_rate(rate)
wav_data = pcm_s16.raw_data
silk_data = pysilk.encode(wav_data, data_rate=rate, sample_rate=rate)
with open(sil_path, "wb") as f:
f.write(silk_data)
return audio.duration_seconds * 1000
def any_to_amr(any_path, amr_path):
"""
把任意格式转成amr文件
"""
if any_path.endswith(".amr"):
shutil.copy2(any_path, amr_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
raise NotImplementedError("Not support file type: {}".format(any_path))
audio = AudioSegment.from_file(any_path)
audio = audio.set_frame_rate(8000) # only support 8000
audio.export(amr_path, format="amr")
return audio.duration_seconds * 1000
def sil_to_wav(silk_path, wav_path, rate: int = 24000):
"""
silk 文件转 wav
"""
wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate)
with open(wav_path, "wb") as f:
f.write(wav_data)
def split_audio(file_path, max_segment_length_ms=60000):
"""
分割音频文件
"""
audio = AudioSegment.from_file(file_path)
audio_length_ms = len(audio)
if audio_length_ms <= max_segment_length_ms:
return audio_length_ms, [file_path]
segments = []
for start_ms in range(0, audio_length_ms, max_segment_length_ms):
end_ms = min(audio_length_ms, start_ms + max_segment_length_ms)
segment = audio[start_ms:end_ms]
segments.append(segment)
file_prefix = file_path[: file_path.rindex(".")]
format = file_path[file_path.rindex(".") + 1 :]
files = []
for i, segment in enumerate(segments):
path = f"{file_prefix}_{i+1}" + f".{format}"
segment.export(path, format=format)
files.append(path)
return audio_length_ms, files
The provided code snippet includes necessary dependencies for implementing the `any_to_amr` function. Write a Python function `def any_to_amr(any_path, amr_path)` to solve the following problem:
把任意格式转成amr文件
Here is the function:
def any_to_amr(any_path, amr_path):
"""
把任意格式转成amr文件
"""
if any_path.endswith(".amr"):
shutil.copy2(any_path, amr_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
raise NotImplementedError("Not support file type: {}".format(any_path))
audio = AudioSegment.from_file(any_path)
audio = audio.set_frame_rate(8000) # only support 8000
audio.export(amr_path, format="amr")
return audio.duration_seconds * 1000 | 把任意格式转成amr文件 |
9,819 |
The provided code snippet includes necessary dependencies for implementing the `split_audio` function. Write a Python function `def split_audio(file_path, max_segment_length_ms=60000)` to solve the following problem:
分割音频文件
Here is the function:
def split_audio(file_path, max_segment_length_ms=60000):
"""
分割音频文件
"""
audio = AudioSegment.from_file(file_path)
audio_length_ms = len(audio)
if audio_length_ms <= max_segment_length_ms:
return audio_length_ms, [file_path]
segments = []
for start_ms in range(0, audio_length_ms, max_segment_length_ms):
end_ms = min(audio_length_ms, start_ms + max_segment_length_ms)
segment = audio[start_ms:end_ms]
segments.append(segment)
file_prefix = file_path[: file_path.rindex(".")]
format = file_path[file_path.rindex(".") + 1 :]
files = []
for i, segment in enumerate(segments):
path = f"{file_prefix}_{i+1}" + f".{format}"
segment.export(path, format=format)
files.append(path)
return audio_length_ms, files | 分割音频文件 |
9,820 |
class BaiduVoice(Voice):
def __init__(self):
try:
curdir = os.path.dirname(__file__)
config_path = os.path.join(curdir, "config.json")
bconf = None
if not os.path.exists(config_path): # 如果没有配置文件,创建本地配置文件
bconf = {"lang": "zh", "ctp": 1, "spd": 5, "pit": 5, "vol": 5, "per": 0}
with open(config_path, "w") as fw:
json.dump(bconf, fw, indent=4)
else:
with open(config_path, "r") as fr:
bconf = json.load(fr)
self.app_id = str(conf().get("baidu_app_id"))
self.api_key = str(conf().get("baidu_api_key"))
self.secret_key = str(conf().get("baidu_secret_key"))
self.dev_id = conf().get("baidu_dev_pid")
self.lang = bconf["lang"]
self.ctp = bconf["ctp"]
self.spd = bconf["spd"]
self.pit = bconf["pit"]
self.vol = bconf["vol"]
self.per = bconf["per"]
self.client = AipSpeech(self.app_id, self.api_key, self.secret_key)
except Exception as e:
logger.warn("BaiduVoice init failed: %s, ignore " % e)
def voiceToText(self, voice_file):
# 识别本地文件
logger.debug("[Baidu] voice file name={}".format(voice_file))
pcm = get_pcm_from_wav(voice_file)
res = self.client.asr(pcm, "pcm", 16000, {"dev_pid": self.dev_id})
if res["err_no"] == 0:
logger.info("百度语音识别到了:{}".format(res["result"]))
text = "".join(res["result"])
reply = Reply(ReplyType.TEXT, text)
else:
logger.info("百度语音识别出错了: {}".format(res["err_msg"]))
if res["err_msg"] == "request pv too much":
logger.info(" 出现这个原因很可能是你的百度语音服务调用量超出限制,或未开通付费")
reply = Reply(ReplyType.ERROR, "百度语音识别出错了;{0}".format(res["err_msg"]))
return reply
def textToVoice(self, text):
result = self.client.synthesis(
text,
self.lang,
self.ctp,
{"spd": self.spd, "pit": self.pit, "vol": self.vol, "per": self.per},
)
if not isinstance(result, dict):
# Avoid the same filename under multithreading
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
with open(fileName, "wb") as f:
f.write(result)
logger.info("[Baidu] textToVoice text={} voice file name={}".format(text, fileName))
reply = Reply(ReplyType.VOICE, fileName)
else:
logger.error("[Baidu] textToVoice error={}".format(result))
reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
return reply
class GoogleVoice(Voice):
recognizer = speech_recognition.Recognizer()
def __init__(self):
pass
def voiceToText(self, voice_file):
with speech_recognition.AudioFile(voice_file) as source:
audio = self.recognizer.record(source)
try:
text = self.recognizer.recognize_google(audio, language="zh-CN")
logger.info("[Google] voiceToText text={} voice file name={}".format(text, voice_file))
reply = Reply(ReplyType.TEXT, text)
except speech_recognition.UnknownValueError:
reply = Reply(ReplyType.ERROR, "抱歉,我听不懂")
except speech_recognition.RequestError as e:
reply = Reply(ReplyType.ERROR, "抱歉,无法连接到 Google 语音识别服务;{0}".format(e))
finally:
return reply
def textToVoice(self, text):
try:
# Avoid the same filename under multithreading
mp3File = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
tts = gTTS(text=text, lang="zh")
tts.save(mp3File)
logger.info("[Google] textToVoice text={} voice file name={}".format(text, mp3File))
reply = Reply(ReplyType.VOICE, mp3File)
except Exception as e:
reply = Reply(ReplyType.ERROR, str(e))
finally:
return reply
class OpenaiVoice(Voice):
def __init__(self):
openai.api_key = conf().get("open_ai_api_key")
def voiceToText(self, voice_file):
logger.debug("[Openai] voice file name={}".format(voice_file))
try:
file = open(voice_file, "rb")
result = openai.Audio.transcribe("whisper-1", file)
text = result["text"]
reply = Reply(ReplyType.TEXT, text)
logger.info("[Openai] voiceToText text={} voice file name={}".format(text, voice_file))
except Exception as e:
reply = Reply(ReplyType.ERROR, "我暂时还无法听清您的语音,请稍后再试吧~")
finally:
return reply
def textToVoice(self, text):
try:
api_base = conf().get("open_ai_api_base") or "https://api.openai.com/v1"
url = f'{api_base}/audio/speech'
headers = {
'Authorization': 'Bearer ' + conf().get("open_ai_api_key"),
'Content-Type': 'application/json'
}
data = {
'model': conf().get("text_to_voice_model") or const.TTS_1,
'input': text,
'voice': conf().get("tts_voice_id") or "alloy"
}
response = requests.post(url, headers=headers, json=data)
file_name = "tmp/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 1000)) + ".mp3"
logger.debug(f"[OPENAI] text_to_Voice file_name={file_name}, input={text}")
with open(file_name, 'wb') as f:
f.write(response.content)
logger.info(f"[OPENAI] text_to_Voice success")
reply = Reply(ReplyType.VOICE, file_name)
except Exception as e:
logger.error(e)
reply = Reply(ReplyType.ERROR, "遇到了一点小问题,请稍后再问我吧")
return reply
class PyttsVoice(Voice):
engine = pyttsx3.init()
def __init__(self):
# 语速
self.engine.setProperty("rate", 125)
# 音量
self.engine.setProperty("volume", 1.0)
if sys.platform == "win32":
for voice in self.engine.getProperty("voices"):
if "Chinese" in voice.name:
self.engine.setProperty("voice", voice.id)
else:
self.engine.setProperty("voice", "zh")
# If the problem of espeak is fixed, using runAndWait() and remove this startLoop()
# TODO: check if this is work on win32
self.engine.startLoop(useDriverLoop=False)
def textToVoice(self, text):
try:
# Avoid the same filename under multithreading
wavFileName = "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
wavFile = TmpDir().path() + wavFileName
logger.info("[Pytts] textToVoice text={} voice file name={}".format(text, wavFile))
self.engine.save_to_file(text, wavFile)
if sys.platform == "win32":
self.engine.runAndWait()
else:
# In ubuntu, runAndWait do not really wait until the file created.
# It will return once the task queue is empty, but the task is still running in coroutine.
# And if you call runAndWait() and time.sleep() twice, it will stuck, so do not use this.
# If you want to fix this, add self._proxy.setBusy(True) in line 127 in espeak.py, at the beginning of the function save_to_file.
# self.engine.runAndWait()
# Before espeak fix this problem, we iterate the generator and control the waiting by ourself.
# But this is not the canonical way to use it, for example if the file already exists it also cannot wait.
self.engine.iterate()
while self.engine.isBusy() or wavFileName not in os.listdir(TmpDir().path()):
time.sleep(0.1)
reply = Reply(ReplyType.VOICE, wavFile)
except Exception as e:
reply = Reply(ReplyType.ERROR, str(e))
finally:
return reply
class AzureVoice(Voice):
def __init__(self):
try:
curdir = os.path.dirname(__file__)
config_path = os.path.join(curdir, "config.json")
config = None
if not os.path.exists(config_path): # 如果没有配置文件,创建本地配置文件
config = {
"speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural", # 识别不出时的默认语音
"auto_detect": True, # 是否自动检测语言
"speech_synthesis_zh": "zh-CN-XiaozhenNeural",
"speech_synthesis_en": "en-US-JacobNeural",
"speech_synthesis_ja": "ja-JP-AoiNeural",
"speech_synthesis_ko": "ko-KR-SoonBokNeural",
"speech_synthesis_de": "de-DE-LouisaNeural",
"speech_synthesis_fr": "fr-FR-BrigitteNeural",
"speech_synthesis_es": "es-ES-LaiaNeural",
"speech_recognition_language": "zh-CN",
}
with open(config_path, "w") as fw:
json.dump(config, fw, indent=4)
else:
with open(config_path, "r") as fr:
config = json.load(fr)
self.config = config
self.api_key = conf().get("azure_voice_api_key")
self.api_region = conf().get("azure_voice_region")
self.speech_config = speechsdk.SpeechConfig(subscription=self.api_key, region=self.api_region)
self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"]
self.speech_config.speech_recognition_language = self.config["speech_recognition_language"]
except Exception as e:
logger.warn("AzureVoice init failed: %s, ignore " % e)
def voiceToText(self, voice_file):
audio_config = speechsdk.AudioConfig(filename=voice_file)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=self.speech_config, audio_config=audio_config)
result = speech_recognizer.recognize_once()
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
logger.info("[Azure] voiceToText voice file name={} text={}".format(voice_file, result.text))
reply = Reply(ReplyType.TEXT, result.text)
else:
cancel_details = result.cancellation_details
logger.error("[Azure] voiceToText error, result={}, errordetails={}".format(result, cancel_details.error_details))
reply = Reply(ReplyType.ERROR, "抱歉,语音识别失败")
return reply
def textToVoice(self, text):
if self.config.get("auto_detect"):
lang = classify(text)[0]
key = "speech_synthesis_" + lang
if key in self.config:
logger.info("[Azure] textToVoice auto detect language={}, voice={}".format(lang, self.config[key]))
self.speech_config.speech_synthesis_voice_name = self.config[key]
else:
self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"]
else:
self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"]
# Avoid the same filename under multithreading
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
audio_config = speechsdk.AudioConfig(filename=fileName)
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=self.speech_config, audio_config=audio_config)
result = speech_synthesizer.speak_text(text)
if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
logger.info("[Azure] textToVoice text={} voice file name={}".format(text, fileName))
reply = Reply(ReplyType.VOICE, fileName)
else:
cancel_details = result.cancellation_details
logger.error("[Azure] textToVoice error, result={}, errordetails={}".format(result, cancel_details.error_details))
reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
return reply
class ElevenLabsVoice(Voice):
def __init__(self):
pass
def voiceToText(self, voice_file):
pass
def textToVoice(self, text):
audio = generate(
text=text,
voice=name,
model='eleven_multilingual_v1'
)
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
with open(fileName, "wb") as f:
f.write(audio)
logger.info("[ElevenLabs] textToVoice text={} voice file name={}".format(text, fileName))
return Reply(ReplyType.VOICE, fileName)
class LinkAIVoice(Voice):
def __init__(self):
pass
def voiceToText(self, voice_file):
logger.debug("[LinkVoice] voice file name={}".format(voice_file))
try:
url = conf().get("linkai_api_base", "https://api.link-ai.chat") + "/v1/audio/transcriptions"
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
model = None
if not conf().get("text_to_voice") or conf().get("voice_to_text") == "openai":
model = const.WHISPER_1
if voice_file.endswith(".amr"):
try:
mp3_file = os.path.splitext(voice_file)[0] + ".mp3"
audio_convert.any_to_mp3(voice_file, mp3_file)
voice_file = mp3_file
except Exception as e:
logger.warn(f"[LinkVoice] amr file transfer failed, directly send amr voice file: {format(e)}")
file = open(voice_file, "rb")
file_body = {
"file": file
}
data = {
"model": model
}
res = requests.post(url, files=file_body, headers=headers, data=data, timeout=(5, 60))
if res.status_code == 200:
text = res.json().get("text")
else:
res_json = res.json()
logger.error(f"[LinkVoice] voiceToText error, status_code={res.status_code}, msg={res_json.get('message')}")
return None
reply = Reply(ReplyType.TEXT, text)
logger.info(f"[LinkVoice] voiceToText success, text={text}, file name={voice_file}")
except Exception as e:
logger.error(e)
return None
return reply
def textToVoice(self, text):
try:
url = conf().get("linkai_api_base", "https://api.link-ai.chat") + "/v1/audio/speech"
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
model = const.TTS_1
if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]:
model = conf().get("text_to_voice_model") or const.TTS_1
data = {
"model": model,
"input": text,
"voice": conf().get("tts_voice_id"),
"app_code": conf().get("linkai_app_code")
}
res = requests.post(url, headers=headers, json=data, timeout=(5, 120))
if res.status_code == 200:
tmp_file_name = "tmp/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 1000)) + ".mp3"
with open(tmp_file_name, 'wb') as f:
f.write(res.content)
reply = Reply(ReplyType.VOICE, tmp_file_name)
logger.info(f"[LinkVoice] textToVoice success, input={text}, model={model}, voice_id={data.get('voice')}")
return reply
else:
res_json = res.json()
logger.error(f"[LinkVoice] textToVoice error, status_code={res.status_code}, msg={res_json.get('message')}")
return None
except Exception as e:
logger.error(e)
# reply = Reply(ReplyType.ERROR, "遇到了一点小问题,请稍后再问我吧")
return None
class AliVoice(Voice):
def __init__(self):
"""
初始化AliVoice类,从配置文件加载必要的配置。
"""
try:
curdir = os.path.dirname(__file__)
config_path = os.path.join(curdir, "config.json")
with open(config_path, "r") as fr:
config = json.load(fr)
self.token = None
self.token_expire_time = 0
# 默认复用阿里云千问的 access_key 和 access_secret
self.api_url = config.get("api_url")
self.app_key = config.get("app_key")
self.access_key_id = conf().get("qwen_access_key_id") or config.get("access_key_id")
self.access_key_secret = conf().get("qwen_access_key_secret") or config.get("access_key_secret")
except Exception as e:
logger.warn("AliVoice init failed: %s, ignore " % e)
def textToVoice(self, text):
"""
将文本转换为语音文件。
:param text: 要转换的文本。
:return: 返回一个Reply对象,其中包含转换得到的语音文件或错误信息。
"""
# 清除文本中的非中文、非英文和非基本字符
text = re.sub(r'[^\u4e00-\u9fa5\u3040-\u30FF\uAC00-\uD7AFa-zA-Z0-9'
r'äöüÄÖÜáéíóúÁÉÍÓÚàèìòùÀÈÌÒÙâêîôûÂÊÎÔÛçÇñÑ,。!?,.]', '', text)
# 提取有效的token
token_id = self.get_valid_token()
fileName = text_to_speech_aliyun(self.api_url, text, self.app_key, token_id)
if fileName:
logger.info("[Ali] textToVoice text={} voice file name={}".format(text, fileName))
reply = Reply(ReplyType.VOICE, fileName)
else:
reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
return reply
def get_valid_token(self):
"""
获取有效的阿里云token。
:return: 返回有效的token字符串。
"""
current_time = time.time()
if self.token is None or current_time >= self.token_expire_time:
get_token = AliyunTokenGenerator(self.access_key_id, self.access_key_secret)
token_str = get_token.get_token()
token_data = json.loads(token_str)
self.token = token_data["Token"]["Id"]
# 将过期时间减少一小段时间(例如5分钟),以避免在边界条件下的过期
self.token_expire_time = token_data["Token"]["ExpireTime"] - 300
logger.debug(f"新获取的阿里云token:{self.token}")
else:
logger.debug("使用缓存的token")
return self.token
The provided code snippet includes necessary dependencies for implementing the `create_voice` function. Write a Python function `def create_voice(voice_type)` to solve the following problem:
create a voice instance :param voice_type: voice type code :return: voice instance
Here is the function:
def create_voice(voice_type):
"""
create a voice instance
:param voice_type: voice type code
:return: voice instance
"""
if voice_type == "baidu":
from voice.baidu.baidu_voice import BaiduVoice
return BaiduVoice()
elif voice_type == "google":
from voice.google.google_voice import GoogleVoice
return GoogleVoice()
elif voice_type == "openai":
from voice.openai.openai_voice import OpenaiVoice
return OpenaiVoice()
elif voice_type == "pytts":
from voice.pytts.pytts_voice import PyttsVoice
return PyttsVoice()
elif voice_type == "azure":
from voice.azure.azure_voice import AzureVoice
return AzureVoice()
elif voice_type == "elevenlabs":
from voice.elevent.elevent_voice import ElevenLabsVoice
return ElevenLabsVoice()
elif voice_type == "linkai":
from voice.linkai.linkai_voice import LinkAIVoice
return LinkAIVoice()
elif voice_type == "ali":
from voice.ali.ali_voice import AliVoice
return AliVoice()
raise RuntimeError | create a voice instance :param voice_type: voice type code :return: voice instance |
9,821 | import json
import time
import requests
import datetime
import hashlib
import hmac
import base64
import urllib.parse
import uuid
from common.log import logger
from common.tmp_dir import TmpDir
=
class TmpDir(object):
"""A temporary directory that is deleted when the object is destroyed."""
tmpFilePath = pathlib.Path("./tmp/")
def __init__(self):
pathExists = os.path.exists(self.tmpFilePath)
if not pathExists:
os.makedirs(self.tmpFilePath)
def path(self):
return str(self.tmpFilePath) + "/"
The provided code snippet includes necessary dependencies for implementing the `text_to_speech_aliyun` function. Write a Python function `def text_to_speech_aliyun(url, text, appkey, token)` to solve the following problem:
使用阿里云的文本转语音服务将文本转换为语音。 参数: - url (str): 阿里云文本转语音服务的端点URL。 - text (str): 要转换为语音的文本。 - appkey (str): 您的阿里云appkey。 - token (str): 阿里云API的认证令牌。 返回值: - str: 成功时输出音频文件的路径,否则为None。
Here is the function:
def text_to_speech_aliyun(url, text, appkey, token):
"""
使用阿里云的文本转语音服务将文本转换为语音。
参数:
- url (str): 阿里云文本转语音服务的端点URL。
- text (str): 要转换为语音的文本。
- appkey (str): 您的阿里云appkey。
- token (str): 阿里云API的认证令牌。
返回值:
- str: 成功时输出音频文件的路径,否则为None。
"""
headers = {
"Content-Type": "application/json",
}
data = {
"text": text,
"appkey": appkey,
"token": token,
"format": "wav"
}
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code == 200 and response.headers['Content-Type'] == 'audio/mpeg':
output_file = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
with open(output_file, 'wb') as file:
file.write(response.content)
logger.debug(f"音频文件保存成功,文件名:{output_file}")
else:
logger.debug("响应状态码: {}".format(response.status_code))
logger.debug("响应内容: {}".format(response.text))
output_file = None
return output_file | 使用阿里云的文本转语音服务将文本转换为语音。 参数: - url (str): 阿里云文本转语音服务的端点URL。 - text (str): 要转换为语音的文本。 - appkey (str): 您的阿里云appkey。 - token (str): 阿里云API的认证令牌。 返回值: - str: 成功时输出音频文件的路径,否则为None。 |
9,822 | import io
import os
from urllib.parse import urlparse
from PIL import Image
def split_string_by_utf8_length(string, max_length, max_split=0):
encoded = string.encode("utf-8")
start, end = 0, 0
result = []
while end < len(encoded):
if max_split > 0 and len(result) >= max_split:
result.append(encoded[start:].decode("utf-8"))
break
end = min(start + max_length, len(encoded))
# 如果当前字节不是 UTF-8 编码的开始字节,则向前查找直到找到开始字节为止
while end < len(encoded) and (encoded[end] & 0b11000000) == 0b10000000:
end -= 1
result.append(encoded[start:end].decode("utf-8"))
start = end
return result | null |
9,823 | import io
import os
from urllib.parse import urlparse
from PIL import Image
def get_path_suffix(path):
path = urlparse(path).path
return os.path.splitext(path)[-1].lstrip('.') | null |
9,824 |
def singleton(cls):
instances = {}
def get_instance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance | null |
9,825 | import time
import pip
from pip._internal import main as pipmain
from common.log import _reset_logger, logger
def _reset_logger(log):
for handler in log.handlers:
handler.close()
log.removeHandler(handler)
del handler
log.handlers.clear()
log.propagate = False
console_handle = logging.StreamHandler(sys.stdout)
console_handle.setFormatter(
logging.Formatter(
"[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
file_handle = logging.FileHandler("run.log", encoding="utf-8")
file_handle.setFormatter(
logging.Formatter(
"[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
log.addHandler(file_handle)
log.addHandler(console_handle)
=
def install_requirements(file):
pipmain(["install", "-r", file, "--upgrade"])
_reset_logger(logger) | null |
9,826 | import time
import pip
from pip._internal import main as pipmain
from common.log import _reset_logger, logger
def install(package):
pipmain(["install", package])
def check_dulwich():
needwait = False
for i in range(2):
if needwait:
time.sleep(3)
needwait = False
try:
import dulwich
return
except ImportError:
try:
install("dulwich")
except:
needwait = True
try:
import dulwich
except ImportError:
raise ImportError("Unable to import dulwich") | null |
9,827 | import hashlib
import re
import time
import config
from common.log import logger
=
def time_checker(f):
def _time_checker(self, *args, **kwargs):
_config = config.conf()
chat_time_module = _config.get("chat_time_module", False)
if chat_time_module:
chat_start_time = _config.get("chat_start_time", "00:00")
chat_stopt_time = _config.get("chat_stop_time", "24:00")
time_regex = re.compile(r"^([01]?[0-9]|2[0-4])(:)([0-5][0-9])$") # 时间匹配,包含24:00
starttime_format_check = time_regex.match(chat_start_time) # 检查停止时间格式
stoptime_format_check = time_regex.match(chat_stopt_time) # 检查停止时间格式
chat_time_check = chat_start_time < chat_stopt_time # 确定启动时间<停止时间
# 时间格式检查
if not (starttime_format_check and stoptime_format_check and chat_time_check):
logger.warn("时间格式不正确,请在config.json中修改您的CHAT_START_TIME/CHAT_STOP_TIME,否则可能会影响您正常使用,开始({})-结束({})".format(starttime_format_check, stoptime_format_check))
if chat_start_time > "23:59":
logger.error("启动时间可能存在问题,请修改!")
# 服务时间检查
now_time = time.strftime("%H:%M", time.localtime())
if chat_start_time <= now_time <= chat_stopt_time: # 服务时间内,正常返回回答
f(self, *args, **kwargs)
return None
else:
if args[0]["Content"] == "#更新配置": # 不在服务时间内也可以更新配置
f(self, *args, **kwargs)
else:
logger.info("非服务时间内,不接受访问")
return None
else:
f(self, *args, **kwargs) # 未开启时间模块则直接回答
return _time_checker | null |
9,828 | import logging
import sys
def _reset_logger(log):
def _get_logger():
log = logging.getLogger("log")
_reset_logger(log)
log.setLevel(logging.INFO)
return log | null |
9,829 | from enum import Enum
from config import conf
from common.log import logger
import requests
import threading
import time
from bridge.reply import Reply, ReplyType
import asyncio
from bridge.context import ContextType
from plugins import EventContext, EventAction
from .utils import Util
=
class Reply:
def __init__(self, type: ReplyType = None, content=None):
self.type = type
self.content = content
def __str__(self):
return "Reply(type={}, content={})".format(self.type, self.content)
def _send(channel, reply: Reply, context, retry_cnt=0):
try:
channel.send(reply, context)
except Exception as e:
logger.error("[WX] sendMsg error: {}".format(str(e)))
if isinstance(e, NotImplementedError):
return
logger.exception(e)
if retry_cnt < 2:
time.sleep(3 + 3 * retry_cnt)
channel.send(reply, context, retry_cnt + 1) | null |
9,830 | from enum import Enum
from config import conf
from common.log import logger
import requests
import threading
import time
from bridge.reply import Reply, ReplyType
import asyncio
from bridge.context import ContextType
from plugins import EventContext, EventAction
from .utils import Util
def check_prefix(content, prefix_list):
if not prefix_list:
return None
for prefix in prefix_list:
if content.startswith(prefix):
return prefix
return None | null |
9,831 | import plugins
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from plugins import *
from .midjourney import MJBot
from .summary import LinkSummary
from bridge import bridge
from common.expired_dict import ExpiredDict
from common import const
import os
from .utils import Util
class ReplyType(Enum):
TEXT = 1 # 文本
VOICE = 2 # 音频文件
IMAGE = 3 # 图片文件
IMAGE_URL = 4 # 图片URL
VIDEO_URL = 5 # 视频URL
FILE = 6 # 文件
CARD = 7 # 微信名片,仅支持ntchat
InviteRoom = 8 # 邀请好友进群
INFO = 9
ERROR = 10
TEXT_ = 11 # 强制文本
VIDEO = 12
MINIAPP = 13 # 小程序
def __str__(self):
return self.name
class Reply:
def __init__(self, type: ReplyType = None, content=None):
self.type = type
self.content = content
def __str__(self):
return "Reply(type={}, content={})".format(self.type, self.content)
def _send_info(e_context: EventContext, content: str):
reply = Reply(ReplyType.TEXT, content)
channel = e_context["channel"]
channel.send(reply, e_context["context"]) | null |
9,832 | import plugins
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from plugins import *
from .midjourney import MJBot
from .summary import LinkSummary
from bridge import bridge
from common.expired_dict import ExpiredDict
from common import const
import os
from .utils import Util
class ReplyType(Enum):
TEXT = 1 # 文本
VOICE = 2 # 音频文件
IMAGE = 3 # 图片文件
IMAGE_URL = 4 # 图片URL
VIDEO_URL = 5 # 视频URL
FILE = 6 # 文件
CARD = 7 # 微信名片,仅支持ntchat
InviteRoom = 8 # 邀请好友进群
INFO = 9
ERROR = 10
TEXT_ = 11 # 强制文本
VIDEO = 12
MINIAPP = 13 # 小程序
def __str__(self):
return self.name
class Reply:
def __init__(self, type: ReplyType = None, content=None):
self.type = type
self.content = content
def __str__(self):
return "Reply(type={}, content={})".format(self.type, self.content)
def _set_reply_text(content: str, e_context: EventContext, level: ReplyType = ReplyType.ERROR):
reply = Reply(level, content)
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS | null |
9,833 | import plugins
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from plugins import *
from .midjourney import MJBot
from .summary import LinkSummary
from bridge import bridge
from common.expired_dict import ExpiredDict
from common import const
import os
from .utils import Util
def _get_trigger_prefix():
return conf().get("plugin_trigger_prefix", "$") | null |
9,834 | import plugins
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from plugins import *
from .midjourney import MJBot
from .summary import LinkSummary
from bridge import bridge
from common.expired_dict import ExpiredDict
from common import const
import os
from .utils import Util
def _find_user_id(context):
if context["isgroup"]:
return context.kwargs.get("msg").actual_user_id
else:
return context["receiver"]
USER_FILE_MAP = ExpiredDict(conf().get("expires_in_seconds") or 60 * 30)
def _find_sum_id(context):
return USER_FILE_MAP.get(_find_user_id(context) + "-sum_id") | null |
9,835 | import plugins
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from plugins import *
from .midjourney import MJBot
from .summary import LinkSummary
from bridge import bridge
from common.expired_dict import ExpiredDict
from common import const
import os
from .utils import Util
def _find_user_id(context):
if context["isgroup"]:
return context.kwargs.get("msg").actual_user_id
else:
return context["receiver"]
USER_FILE_MAP = ExpiredDict(conf().get("expires_in_seconds") or 60 * 30)
def _find_file_id(context):
user_id = _find_user_id(context)
if user_id:
return USER_FILE_MAP.get(user_id + "-file_id") | null |
9,836 | import json
import os
import random
import string
import logging
from typing import Tuple
import bridge.bridge
import plugins
from bridge.bridge import Bridge
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common import const
from config import conf, load_config, global_config
from plugins import *
ADMIN_COMMANDS = {
"resume": {
"alias": ["resume", "恢复服务"],
"desc": "恢复服务",
},
"stop": {
"alias": ["stop", "暂停服务"],
"desc": "暂停服务",
},
"reconf": {
"alias": ["reconf", "重载配置"],
"desc": "重载配置(不包含插件配置)",
},
"resetall": {
"alias": ["resetall", "重置所有会话"],
"desc": "重置所有会话",
},
"scanp": {
"alias": ["scanp", "扫描插件"],
"desc": "扫描插件目录是否有新插件",
},
"plist": {
"alias": ["plist", "插件"],
"desc": "打印当前插件列表",
},
"setpri": {
"alias": ["setpri", "设置插件优先级"],
"args": ["插件名", "优先级"],
"desc": "设置指定插件的优先级,越大越优先",
},
"reloadp": {
"alias": ["reloadp", "重载插件"],
"args": ["插件名"],
"desc": "重载指定插件配置",
},
"enablep": {
"alias": ["enablep", "启用插件"],
"args": ["插件名"],
"desc": "启用指定插件",
},
"disablep": {
"alias": ["disablep", "禁用插件"],
"args": ["插件名"],
"desc": "禁用指定插件",
},
"installp": {
"alias": ["installp", "安装插件"],
"args": ["仓库地址或插件名"],
"desc": "安装指定插件",
},
"uninstallp": {
"alias": ["uninstallp", "卸载插件"],
"args": ["插件名"],
"desc": "卸载指定插件",
},
"updatep": {
"alias": ["updatep", "更新插件"],
"args": ["插件名"],
"desc": "更新指定插件",
},
"debug": {
"alias": ["debug", "调试模式", "DEBUG"],
"desc": "开启机器调试日志",
},
}
help_text = "通用指令\n"
for cmd, info in COMMANDS.items():
if cmd in ["auth", "set_openai_api_key", "reset_openai_api_key", "set_gpt_model", "reset_gpt_model", "gpt_model"]: # 不显示帮助指令
continue
if cmd == "id" and conf().get("channel_type", "wx") not in ["wxy", "wechatmp"]:
continue
alias = ["#" + a for a in info["alias"][:1]]
help_text += f"{','.join(alias)} "
if "args" in info:
args = [a for a in info["args"]]
help_text += f"{' '.join(args)}"
help_text += f": {info['desc']}\n"
gins = PluginManager().list_plugins()
help_text += "\n可用插件"
for plugin in plugins:
if plugins[plugin].enabled and not plugins[plugin].hidden:
namecn = plugins[plugin].namecn
help_text += "\n%s:" % namecn
help_text += PluginManager().instances[plugin].get_help_text(verbose=False).strip()
if ADMIN_COMMANDS and isadmin:
help_text += "\n\n管理员指令:\n"
for cmd, info in ADMIN_COMMANDS.items():
alias = ["#" + a for a in info["alias"][:1]]
help_text += f"{','.join(alias)} "
if "args" in info:
args = [a for a in info["args"]]
help_text += f"{' '.join(args)}"
help_text += f": {info['desc']}\n"
return help_text
hidden=True,
# openai apibase,当use_azure_chatgpt为true时,需要设置对应的api base
"open_ai_api_base": "https://api.openai.com/v1",
"proxy": "", # openai使用的代理
# chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称
"model": "gpt-3.5-turbo", # 还支持 gpt-4, gpt-4-turbo, wenxin, xunfei, qwen
"use_azure_chatgpt": False, # 是否使用azure的chatgpt
"azure_deployment_id": "", # azure 模型部署名称
"azure_api_version": "", # azure api版本
# Bot触发配置
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人
"single_chat_reply_suffix": "", # 私聊时自动回复的后缀,\n 可以换行
"group_chat_prefix": ["@bot"], # 群聊时包含该前缀则会触发机器人回复
"group_chat_reply_prefix": "", # 群聊时自动回复的前缀
"group_chat_reply_suffix": "", # 群聊时自动回复的后缀,\n 可以换行
"group_chat_keyword": [], # 群聊时包含该关键词则会触发机器人回复
"group_at_off": False, # 是否关闭群聊时@bot的触发
"group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"], # 开启自动回复的群名称列表
"group_name_keyword_white_list": [], # 开启自动回复的群名称关键词列表
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
"nick_name_black_list": [], # 用户昵称黑名单
"group_welcome_msg": "", # 配置新人进群固定欢迎语,不配置则使用随机风格欢迎
"trigger_by_self": False, # 是否允许机器人触发
"text_to_image": "dall-e-2", # 图片生成模型,可选 dall-e-2, dall-e-3
"image_proxy": True, # 是否需要图片代理,国内访问LinkAI时需要
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
"concurrency_in_session": 1, # 同一会话最多有多少条消息在处理中,大于1可能乱序
"image_create_size": "256x256", # 图片大小,可选有 256x256, 512x512, 1024x1024 (dall-e-3默认为1024x1024)
"group_chat_exit_group": False,
# chatgpt会话参数
"expires_in_seconds": 3600, # 无操作会话的过期时间
# 人格描述
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
"conversation_max_tokens": 1000, # 支持上下文记忆的最多字符数
# chatgpt限流配置
"rate_limit_chatgpt": 20, # chatgpt的调用频率限制
"rate_limit_dalle": 50, # openai dalle的调用频率限制
# chatgpt api参数 参考https://platform.openai.com/docs/api-reference/chat/create
"temperature": 0.9,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"request_timeout": 180, # chatgpt请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": 120, # chatgpt重试超时时间,在这个时间内,将会自动重试
# Baidu 文心一言参数
"baidu_wenxin_model": "eb-instant", # 默认使用ERNIE-Bot-turbo模型
"baidu_wenxin_api_key": "", # Baidu api key
"baidu_wenxin_secret_key": "", # Baidu secret key
# 讯飞星火API
"xunfei_app_id": "", # 讯飞应用ID
"xunfei_api_key": "", # 讯飞 API key
"xunfei_api_secret": "", # 讯飞 API secret
# claude 配置
"claude_api_cookie": "",
"claude_uuid": "",
# 通义千问API, 获取方式查看文档 https://help.aliyun.com/document_detail/2587494.html
"qwen_access_key_id": "",
"qwen_access_key_secret": "",
"qwen_agent_key": "",
"qwen_app_id": "",
"qwen_node_id": "", # 流程编排模型用到的id,如果没有用到qwen_node_id,请务必保持为空字符串
# Google Gemini Api Key
"gemini_api_key": "",
# wework的通用配置
"wework_smart": True, # 配置wework是否使用已登录的企业微信,False为多开
# 语音设置
"speech_recognition": True, # 是否开启语音识别
"group_speech_recognition": False, # 是否开启群组语音识别
"voice_reply_voice": False, # 是否使用语音回复语音,需要设置对应语音合成引擎的api key
"always_reply_voice": False, # 是否一直使用语音回复
"voice_to_text": "openai", # 语音识别引擎,支持openai,baidu,google,azure
"text_to_voice": "openai", # 语音合成引擎,支持openai,baidu,google,pytts(offline),azure,elevenlabs
"text_to_voice_model": "tts-1",
"tts_voice_id": "alloy",
# baidu 语音api配置, 使用百度语音识别和语音合成时需要
"baidu_app_id": "",
"baidu_api_key": "",
"baidu_secret_key": "",
# 1536普通话(支持简单的英文识别) 1737英语 1637粤语 1837四川话 1936普通话远场
"baidu_dev_pid": "1536",
# azure 语音api配置, 使用azure语音识别和语音合成时需要
"azure_voice_api_key": "",
"azure_voice_region": "japaneast",
# elevenlabs 语音api配置
"xi_api_key": "", #获取ap的方法可以参考https://docs.elevenlabs.io/api-reference/quick-start/authentication
"xi_voice_id": "", #ElevenLabs提供了9种英式、美式等英语发音id,分别是“Adam/Antoni/Arnold/Bella/Domi/Elli/Josh/Rachel/Sam”
# 服务时间限制,目前支持itchat
"chat_time_module": False, # 是否开启服务时间限制
"chat_start_time": "00:00", # 服务开始时间
"chat_stop_time": "24:00", # 服务结束时间
# 翻译api
"translate": "baidu", # 翻译api,支持baidu
# baidu翻译api的配置
"baidu_translate_app_id": "", # 百度翻译api的appid
"baidu_translate_app_key": "", # 百度翻译api的秘钥
# itchat的配置
"hot_reload": False, # 是否开启热重载
# wechaty的配置
"wechaty_puppet_service_token": "", # wechaty的token
# wechatmp的配置
"wechatmp_token": "", # 微信公众平台的Token
"wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
"wechatmp_app_id": "", # 微信公众平台的appID
"wechatmp_app_secret": "", # 微信公众平台的appsecret
"wechatmp_aes_key": "", # 微信公众平台的EncodingAESKey,加密模式需要
# wechatcom的通用配置
"wechatcom_corp_id": "", # 企业微信公司的corpID
# wechatcomapp的配置
"wechatcomapp_token": "", # 企业微信app的token
"wechatcomapp_port": 9898, # 企业微信app的服务端口,不需要端口转发
"wechatcomapp_secret": "", # 企业微信app的secret
"wechatcomapp_agent_id": "", # 企业微信app的agent_id
"wechatcomapp_aes_key": "", # 企业微信app的aes_key
# 飞书配置
"feishu_port": 80, # 飞书bot监听端口
"feishu_app_id": "", # 飞书机器人应用APP Id
"feishu_app_secret": "", # 飞书机器人APP secret
"feishu_token": "", # 飞书 verification token
"feishu_bot_name": "", # 飞书机器人的名字
# 钉钉配置
"dingtalk_client_id": "", # 钉钉机器人Client ID
"dingtalk_client_secret": "", # 钉钉机器人Client Secret
# chatgpt指令自定义触发词
"clear_memory_commands": ["#清除记忆"], # 重置会话指令,必须以#开头
# channel配置
"channel_type": "wx", # 通道类型,支持:{wx,wxy,terminal,wechatmp,wechatmp_service,wechatcom_app}
"subscribe_msg": "", # 订阅消息, 支持: wechatmp, wechatmp_service, wechatcom_app
"debug": False, # 是否开启debug模式,开启后会打印更多日志
"appdata_dir": "", # 数据目录
# 插件配置
"plugin_trigger_prefix": "$", # 规范插件提供聊天相关指令的前缀,建议不要和管理员指令前缀"#"冲突
# 是否使用全局插件配置
"use_global_plugin_config": False,
"max_media_send_count": 3, # 单次最大发送媒体资源的个数
"media_send_interval": 1, # 发送图片的事件间隔,单位秒
# 智谱AI 平台配置
"zhipu_ai_api_key": "",
"zhipu_ai_api_base": "https://open.bigmodel.cn/api/paas/v4",
# LinkAI平台配置
"use_linkai": False,
"linkai_api_key": "",
"linkai_app_code": "",
"linkai_api_base": "https://api.link-ai.chat", # linkAI服务地址,若国内无法访问或延迟较高可改为 https://api.link-ai.tech
}
def conf():
return config
[]
}
def get_help_text(isadmin, isgroup):
help_text = "通用指令\n"
for cmd, info in COMMANDS.items():
if cmd in ["auth", "set_openai_api_key", "reset_openai_api_key", "set_gpt_model", "reset_gpt_model", "gpt_model"]: # 不显示帮助指令
continue
if cmd == "id" and conf().get("channel_type", "wx") not in ["wxy", "wechatmp"]:
continue
alias = ["#" + a for a in info["alias"][:1]]
help_text += f"{','.join(alias)} "
if "args" in info:
args = [a for a in info["args"]]
help_text += f"{' '.join(args)}"
help_text += f": {info['desc']}\n"
# 插件指令
plugins = PluginManager().list_plugins()
help_text += "\n可用插件"
for plugin in plugins:
if plugins[plugin].enabled and not plugins[plugin].hidden:
namecn = plugins[plugin].namecn
help_text += "\n%s:" % namecn
help_text += PluginManager().instances[plugin].get_help_text(verbose=False).strip()
if ADMIN_COMMANDS and isadmin:
help_text += "\n\n管理员指令:\n"
for cmd, info in ADMIN_COMMANDS.items():
alias = ["#" + a for a in info["alias"][:1]]
help_text += f"{','.join(alias)} "
if "args" in info:
args = [a for a in info["args"]]
help_text += f"{' '.join(args)}"
help_text += f": {info['desc']}\n"
return help_text | null |
9,837 | import glob
import os
import re
import subprocess
from os.path import basename, splitext, join
from setuptools import setup
from setuptools.command.install import install
The provided code snippet includes necessary dependencies for implementing the `get_packages` function. Write a Python function `def get_packages(base="inputremapper")` to solve the following problem:
Return all modules used in input-remapper. For example 'inputremapper.gui' or 'inputremapper.injection.mapping_handlers'
Here is the function:
def get_packages(base="inputremapper"):
"""Return all modules used in input-remapper.
For example 'inputremapper.gui' or 'inputremapper.injection.mapping_handlers'
"""
if not os.path.exists(os.path.join(base, "__init__.py")):
# only python modules
return []
result = [base.replace("/", ".")]
for name in os.listdir(base):
if not os.path.isdir(os.path.join(base, name)):
continue
if name == "__pycache__":
continue
# find more python submodules in that directory
result += get_packages(os.path.join(base, name))
return result | Return all modules used in input-remapper. For example 'inputremapper.gui' or 'inputremapper.injection.mapping_handlers' |
9,838 | import glob
import os
import re
import subprocess
from os.path import basename, splitext, join
from setuptools import setup
from setuptools.command.install import install
PO_FILES = "po/*.po"
for po_file in glob.glob(PO_FILES):
lang = splitext(basename(po_file))[0]
lang_data.append(
(
f"/usr/share/input-remapper/lang/{lang}/LC_MESSAGES",
[f"mo/{lang}/input-remapper.mo"],
)
)
The provided code snippet includes necessary dependencies for implementing the `make_lang` function. Write a Python function `def make_lang()` to solve the following problem:
Build po files into mo/.
Here is the function:
def make_lang():
"""Build po files into mo/."""
os.makedirs("mo", exist_ok=True)
for po_file in glob.glob(PO_FILES):
lang = splitext(basename(po_file))[0]
os.makedirs(join("mo", lang), exist_ok=True)
print(f"generating translation for {lang}")
subprocess.run(
["msgfmt", "-o", join("mo", lang, "input-remapper.mo"), str(po_file)],
check=True,
) | Build po files into mo/. |
9,839 | import sys
from hashlib import md5
from typing import Optional
import evdev
def is_service() -> bool:
return sys.argv[0].endswith("input-remapper-service") | null |
9,840 | import sys
from hashlib import md5
from typing import Optional
import evdev
DeviceHash = str
The provided code snippet includes necessary dependencies for implementing the `get_device_hash` function. Write a Python function `def get_device_hash(device: evdev.InputDevice) -> DeviceHash` to solve the following problem:
get a unique hash for the given device
Here is the function:
def get_device_hash(device: evdev.InputDevice) -> DeviceHash:
"""get a unique hash for the given device"""
# the builtin hash() function can not be used because it is randomly
# seeded at python startup.
# a non-cryptographic hash would be faster but there is none in the standard lib
s = str(device.capabilities(absinfo=False)) + device.name
return md5(s.encode()).hexdigest().lower() | get a unique hash for the given device |
9,841 | from __future__ import annotations
import asyncio
import copy
import math
import re
from typing import List, Callable, Awaitable, Tuple, Optional, Union, Any
from evdev.ecodes import (
ecodes,
EV_KEY,
EV_REL,
REL_X,
REL_Y,
REL_WHEEL_HI_RES,
REL_HWHEEL_HI_RES,
REL_WHEEL,
REL_HWHEEL,
)
from inputremapper.configs.system_mapping import system_mapping
from inputremapper.configs.validation_errors import (
SymbolNotAvailableInTargetError,
MacroParsingError,
)
from inputremapper.injection.global_uinputs import can_default_uinput_emit
from inputremapper.ipc.shared_dict import SharedDict
from inputremapper.logger import logger
class MacroParsingError(ValueError):
"""Macro syntax errors."""
def __init__(self, symbol: Optional[str] = None, msg="Error while parsing a macro"):
self.symbol = symbol
super().__init__(msg)
The provided code snippet includes necessary dependencies for implementing the `_type_check_variablename` function. Write a Python function `def _type_check_variablename(name: str)` to solve the following problem:
Check if this is a legit variable name. Because they could clash with language features. If the macro is able to be parsed at all due to a problematic choice of a variable name. Allowed examples: "foo", "Foo1234_", "_foo_1234" Not allowed: "1_foo", "foo=blub", "$foo", "foo,1234", "foo()"
Here is the function:
def _type_check_variablename(name: str):
"""Check if this is a legit variable name.
Because they could clash with language features. If the macro is able to be
parsed at all due to a problematic choice of a variable name.
Allowed examples: "foo", "Foo1234_", "_foo_1234"
Not allowed: "1_foo", "foo=blub", "$foo", "foo,1234", "foo()"
"""
if not isinstance(name, str) or not re.match(r"^[A-Za-z_][A-Za-z_0-9]*$", name):
raise MacroParsingError(msg=f'"{name}" is not a legit variable name') | Check if this is a legit variable name. Because they could clash with language features. If the macro is able to be parsed at all due to a problematic choice of a variable name. Allowed examples: "foo", "Foo1234_", "_foo_1234" Not allowed: "1_foo", "foo=blub", "$foo", "foo,1234", "foo()" |
9,842 | from __future__ import annotations
import asyncio
import copy
import math
import re
from typing import List, Callable, Awaitable, Tuple, Optional, Union, Any
from evdev.ecodes import (
ecodes,
EV_KEY,
EV_REL,
REL_X,
REL_Y,
REL_WHEEL_HI_RES,
REL_HWHEEL_HI_RES,
REL_WHEEL,
REL_HWHEEL,
)
from inputremapper.configs.system_mapping import system_mapping
from inputremapper.configs.validation_errors import (
SymbolNotAvailableInTargetError,
MacroParsingError,
)
from inputremapper.injection.global_uinputs import can_default_uinput_emit
from inputremapper.ipc.shared_dict import SharedDict
from inputremapper.logger import logger
class Variable:
"""Can be used as function parameter in the various add_... functions.
Parsed from strings like `$foo` in `repeat($foo, k(KEY_A))`
Its value is unknown during construction and needs to be set using the `set` macro
during runtime.
"""
def __init__(self, name: str):
self.name = name
def resolve(self):
"""Get the variables value from memory."""
return macro_variables.get(self.name)
def __repr__(self):
return f'<Variable "{self.name}" at {hex(id(self))}>'
def _type_check(value: Any, allowed_types, display_name=None, position=None) -> Any:
"""Validate a parameter used in a macro.
If the value is a Variable, it will be returned and should be resolved
during runtime with _resolve.
"""
if isinstance(value, Variable):
# it is a variable and will be read at runtime
return value
for allowed_type in allowed_types:
if allowed_type is None:
if value is None:
return value
continue
# try to parse "1" as 1 if possible
if allowed_type != Macro:
# the macro constructor with a single argument always succeeds,
# but will definitely not result in the correct macro
try:
return allowed_type(value)
except (TypeError, ValueError):
pass
if isinstance(value, allowed_type):
return value
if display_name is not None and position is not None:
raise MacroParsingError(
msg=f"Expected parameter {position} for {display_name} to be "
f"one of {allowed_types}, but got {value}"
)
raise MacroParsingError(
msg=f"Expected parameter to be one of {allowed_types}, but got {value}"
)
logger = cast(Logger, logging.getLogger("input-remapper"))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
The provided code snippet includes necessary dependencies for implementing the `_resolve` function. Write a Python function `def _resolve(argument, allowed_types=None)` to solve the following problem:
If the argument is a variable, figure out its value and cast it. Variables are prefixed with `$` in the syntax. Use this just-in-time when you need the actual value of the variable during runtime.
Here is the function:
def _resolve(argument, allowed_types=None):
"""If the argument is a variable, figure out its value and cast it.
Variables are prefixed with `$` in the syntax.
Use this just-in-time when you need the actual value of the variable
during runtime.
"""
if isinstance(argument, Variable):
value = argument.resolve()
logger.debug('"%s" is "%s"', argument, value)
if allowed_types:
return _type_check(value, allowed_types)
else:
return value
return argument | If the argument is a variable, figure out its value and cast it. Variables are prefixed with `$` in the syntax. Use this just-in-time when you need the actual value of the variable during runtime. |
9,843 | import inspect
import re
from typing import Optional, Any
from inputremapper.configs.validation_errors import MacroParsingError
from inputremapper.injection.macros.macro import Macro, Variable
from inputremapper.logger import logger
def _parse_recurse(
code: str,
context,
mapping,
verbose: bool,
macro_instance: Optional[Macro] = None,
depth: int = 0,
):
"""Handle a subset of the macro, e.g. one parameter or function call.
Not using eval for security reasons.
Parameters
----------
code
Just like parse. A single parameter or the complete macro as string.
Comments and redundant whitespace characters are expected to be removed already.
TODO add some examples.
Are all of "foo(1);bar(2)" "foo(1)" and "1" valid inputs?
context : Context
macro_instance
A macro instance to add tasks to. This is the output of the parser, and is
organized like a tree.
depth
For logging porposes
"""
assert isinstance(code, str)
assert isinstance(depth, int)
def debug(*args, **kwargs):
if verbose:
logger.debug(*args, **kwargs)
space = " " * depth
code = code.strip()
if code == "" or code == "None":
# A function parameter probably
# I think "" is the deprecated alternative to "None"
return None
if code.startswith('"'):
# TODO and endswith check, if endswith fails throw error?
# what is currently the error if only one quote is set?
# a string, don't parse. remove quotes
string = code[1:-1]
debug("%sstring %s", space, string)
return string
if code.startswith("$"):
# will be resolved during the macros runtime
return Variable(code.split("$", 1)[1])
if _is_number(code):
if "." in code:
code = float(code)
else:
code = int(code)
debug("%snumber %s", space, code)
return code
# is it another macro?
call_match = re.match(r"^(\w+)\(", code)
call = call_match[1] if call_match else None
if call is not None:
if macro_instance is None:
# start a new chain
macro_instance = Macro(code, context, mapping)
else:
# chain this call to the existing instance
assert isinstance(macro_instance, Macro)
task_factory = TASK_FACTORIES.get(call)
if task_factory is None:
raise MacroParsingError(code, f"Unknown function {call}")
# get all the stuff inbetween
closing_bracket_position = _count_brackets(code) - 1
inner = code[code.index("(") + 1 : closing_bracket_position]
debug("%scalls %s with %s", space, call, inner)
# split "3, foo=a(2, k(a).w(10))" into arguments
raw_string_args = _extract_args(inner)
# parse and sort the params
positional_args = []
keyword_args = {}
for param in raw_string_args:
key, value = _split_keyword_arg(param)
parsed = _parse_recurse(
value.strip(), context, mapping, verbose, None, depth + 1
)
if key is None:
if len(keyword_args) > 0:
msg = f'Positional argument "{key}" follows keyword argument'
raise MacroParsingError(code, msg)
positional_args.append(parsed)
else:
if key in keyword_args:
raise MacroParsingError(
code, f'The "{key}" argument was specified twice'
)
keyword_args[key] = parsed
debug(
"%sadd call to %s with %s, %s",
space,
call,
positional_args,
keyword_args,
)
min_args, max_args = get_num_parameters(task_factory)
num_provided_args = len(raw_string_args)
if num_provided_args < min_args or num_provided_args > max_args:
if min_args != max_args:
msg = (
f"{call} takes between {min_args} and {max_args}, "
f"not {num_provided_args} parameters"
)
else:
msg = f"{call} takes {min_args}, not {num_provided_args} parameters"
raise MacroParsingError(code, msg)
use_safe_argument_names(keyword_args)
try:
task_factory(macro_instance, *positional_args, **keyword_args)
except TypeError as exception:
raise MacroParsingError(msg=str(exception)) from exception
# is after this another call? Chain it to the macro_instance
more_code_exists = len(code) > closing_bracket_position + 1
if more_code_exists:
next_char = code[closing_bracket_position + 1]
statement_closed = next_char == "."
if statement_closed:
# skip over the ")."
chain = code[closing_bracket_position + 2 :]
debug("%sfollowed by %s", space, chain)
_parse_recurse(chain, context, mapping, verbose, macro_instance, depth)
elif re.match(r"[a-zA-Z_]", next_char):
# something like foo()bar
raise MacroParsingError(
code,
f'Expected a "." to follow after '
f"{code[:closing_bracket_position + 1]}",
)
return macro_instance
# It is probably either a key name like KEY_A or a variable name as in `set(var,1)`,
# both won't contain special characters that can break macro syntax so they don't
# have to be wrapped in quotes.
debug("%sstring %s", space, code)
return code
def handle_plus_syntax(macro):
"""Transform a + b + c to hold_keys(a,b,c)."""
if "+" not in macro:
return macro
if "(" in macro or ")" in macro:
raise MacroParsingError(
macro, f'Mixing "+" and macros is unsupported: "{ macro}"'
)
chunks = [chunk.strip() for chunk in macro.split("+")]
if "" in chunks:
raise MacroParsingError(f'Invalid syntax for "{macro}"')
output = f"hold_keys({','.join(chunks)})"
logger.debug('Transformed "%s" to "%s"', macro, output)
return output
def clean(code):
"""Remove everything irrelevant for the macro."""
return remove_whitespaces(remove_comments(code), '"')
class MacroParsingError(ValueError):
"""Macro syntax errors."""
def __init__(self, symbol: Optional[str] = None, msg="Error while parsing a macro"):
self.symbol = symbol
super().__init__(msg)
class Macro:
"""Supports chaining and preparing actions.
Calling functions like keycode on Macro doesn't inject any events yet,
it means that once .run is used it will be executed along with all other
queued tasks.
Those functions need to construct an asyncio coroutine and append it to
self.tasks. This makes parameter checking during compile time possible, as long
as they are not variables that are resolved durig runtime. Coroutines receive a
handler as argument, which is a function that can be used to inject input events
into the system.
1. A few parameters of any time are thrown into a macro function like `repeat`
2. `Macro.repeat` will verify the parameter types if possible using `_type_check`
(it can't for $variables). This helps debugging macros before the injection
starts, but is not mandatory to make things work.
3. `Macro.repeat`
- adds a task to self.tasks. This task resolves any variables with `_resolve`
and does what the macro is supposed to do once `macro.run` is called.
- also adds the child macro to self.child_macros.
- adds the used keys to the capabilities
4. `Macro.run` will run all tasks in self.tasks
"""
def __init__(
self,
code: Optional[str],
context=None,
mapping=None,
):
"""Create a macro instance that can be populated with tasks.
Parameters
----------
code
The original parsed code, for logging purposes.
context : Context
mapping : UIMapping
"""
self.code = code
self.context = context
self.mapping = mapping
# TODO check if mapping is ever none by throwing an error
# List of coroutines that will be called sequentially.
# This is the compiled code
self.tasks: List[MacroTask] = []
# can be used to wait for the release of the event
self._trigger_release_event = asyncio.Event()
self._trigger_press_event = asyncio.Event()
# released by default
self._trigger_release_event.set()
self._trigger_press_event.clear()
self.running = False
self.child_macros: List[Macro] = []
self.keystroke_sleep_ms = None
def is_holding(self):
"""Check if the macro is waiting for a key to be released."""
return not self._trigger_release_event.is_set()
def get_capabilities(self):
"""Get the merged capabilities of the macro and its children."""
capabilities = copy.deepcopy(self.capabilities)
for macro in self.child_macros:
macro_capabilities = macro.get_capabilities()
for type_ in macro_capabilities:
if type_ not in capabilities:
capabilities[type_] = set()
capabilities[type_].update(macro_capabilities[type_])
return capabilities
async def run(self, handler: Callable):
"""Run the macro.
Parameters
----------
handler
Will receive int type, code and value for an event to write
"""
if not callable(handler):
raise ValueError("handler is not callable")
if self.running:
logger.error('Tried to run already running macro "%s"', self.code)
return
self.keystroke_sleep_ms = self.mapping.macro_key_sleep_ms
self.running = True
try:
for task in self.tasks:
coroutine = task(handler)
if asyncio.iscoroutine(coroutine):
await coroutine
except Exception:
raise
finally:
# done
self.running = False
def press_trigger(self):
"""The user pressed the trigger key down."""
if self.is_holding():
logger.error("Already holding")
return
self._trigger_release_event.clear()
self._trigger_press_event.set()
for macro in self.child_macros:
macro.press_trigger()
def release_trigger(self):
"""The user released the trigger key."""
self._trigger_release_event.set()
self._trigger_press_event.clear()
for macro in self.child_macros:
macro.release_trigger()
async def _keycode_pause(self, _=None):
"""To add a pause between keystrokes.
This was needed at some point because it appeared that injecting keys too
fast will prevent them from working. It probably depends on the environment.
"""
await asyncio.sleep(self.keystroke_sleep_ms / 1000)
def __repr__(self):
return f'<Macro "{self.code}" at {hex(id(self))}>'
"""Functions that prepare the macro."""
def add_key(self, symbol: str):
"""Write the symbol."""
# This is done to figure out if the macro is broken at compile time, because
# if KEY_A was unknown we can show this in the gui before the injection starts.
self._type_check_symbol(symbol)
async def task(handler: Callable):
# if the code is $foo, figure out the correct code now.
resolved_symbol = _resolve(symbol, [str])
code = self._type_check_symbol(resolved_symbol)
resolved_code = _resolve(code, [int])
handler(EV_KEY, resolved_code, 1)
await self._keycode_pause()
handler(EV_KEY, resolved_code, 0)
await self._keycode_pause()
self.tasks.append(task)
def add_key_down(self, symbol: str):
"""Press the symbol."""
self._type_check_symbol(symbol)
async def task(handler: Callable):
resolved_symbol = _resolve(symbol, [str])
code = self._type_check_symbol(resolved_symbol)
resolved_code = _resolve(code, [int])
handler(EV_KEY, resolved_code, 1)
self.tasks.append(task)
def add_key_up(self, symbol: str):
"""Release the symbol."""
self._type_check_symbol(symbol)
async def task(handler: Callable):
resolved_symbol = _resolve(symbol, [str])
code = self._type_check_symbol(resolved_symbol)
resolved_code = _resolve(code, [int])
handler(EV_KEY, resolved_code, 0)
self.tasks.append(task)
def add_hold(self, macro=None):
"""Loops the execution until key release."""
_type_check(macro, [Macro, str, None], "hold", 1)
if macro is None:
self.tasks.append(lambda _: self._trigger_release_event.wait())
return
if not isinstance(macro, Macro):
# if macro is a key name, hold down the key while the
# keyboard key is physically held down
symbol = macro
self._type_check_symbol(symbol)
async def task(handler: Callable):
resolved_symbol = _resolve(symbol, [str])
code = self._type_check_symbol(resolved_symbol)
resolved_code = _resolve(code, [int])
handler(EV_KEY, resolved_code, 1)
await self._trigger_release_event.wait()
handler(EV_KEY, resolved_code, 0)
self.tasks.append(task)
if isinstance(macro, Macro):
# repeat the macro forever while the key is held down
async def task(handler: Callable):
while self.is_holding():
# run the child macro completely to avoid
# not-releasing any key
await macro.run(handler)
# give some other code a chance to run
await asyncio.sleep(1 / 1000)
self.tasks.append(task)
self.child_macros.append(macro)
def add_modify(self, modifier: str, macro: Macro):
"""Do stuff while a modifier is activated.
Parameters
----------
modifier
macro
"""
_type_check(macro, [Macro], "modify", 2)
self._type_check_symbol(modifier)
self.child_macros.append(macro)
async def task(handler: Callable):
# TODO test var
resolved_modifier = _resolve(modifier, [str])
code = self._type_check_symbol(resolved_modifier)
handler(EV_KEY, code, 1)
await self._keycode_pause()
await macro.run(handler)
handler(EV_KEY, code, 0)
await self._keycode_pause()
self.tasks.append(task)
def add_hold_keys(self, *symbols):
"""Hold down multiple keys, equivalent to `a + b + c + ...`."""
for symbol in symbols:
self._type_check_symbol(symbol)
async def task(handler: Callable):
resolved_symbols = [_resolve(symbol, [str]) for symbol in symbols]
codes = [self._type_check_symbol(symbol) for symbol in resolved_symbols]
for code in codes:
handler(EV_KEY, code, 1)
await self._keycode_pause()
await self._trigger_release_event.wait()
for code in codes[::-1]:
handler(EV_KEY, code, 0)
await self._keycode_pause()
self.tasks.append(task)
def add_repeat(self, repeats: Union[str, int], macro: Macro):
"""Repeat actions."""
repeats = _type_check(repeats, [int], "repeat", 1)
_type_check(macro, [Macro], "repeat", 2)
async def task(handler: Callable):
for _ in range(_resolve(repeats, [int])):
await macro.run(handler)
self.tasks.append(task)
self.child_macros.append(macro)
def add_event(self, type_: Union[str, int], code: Union[str, int], value: int):
"""Write any event.
Parameters
----------
type_
examples: 2, 'EV_KEY'
code
examples: 52, 'KEY_A'
value
"""
type_ = _type_check(type_, [int, str], "event", 1)
code = _type_check(code, [int, str], "event", 2)
value = _type_check(value, [int, str], "event", 3)
if isinstance(type_, str):
type_ = ecodes[type_.upper()]
if isinstance(code, str):
code = ecodes[code.upper()]
self.tasks.append(lambda handler: handler(type_, code, value))
self.tasks.append(self._keycode_pause)
def add_mouse(self, direction: str, speed: int):
"""Move the mouse cursor."""
_type_check(direction, [str], "mouse", 1)
speed = _type_check(speed, [int], "mouse", 2)
code, value = {
"up": (REL_Y, -1),
"down": (REL_Y, 1),
"left": (REL_X, -1),
"right": (REL_X, 1),
}[direction.lower()]
async def task(handler: Callable):
resolved_speed = value * _resolve(speed, [int])
while self.is_holding():
handler(EV_REL, code, resolved_speed)
await asyncio.sleep(1 / self.mapping.rel_rate)
self.tasks.append(task)
def add_wheel(self, direction: str, speed: int):
"""Move the scroll wheel."""
_type_check(direction, [str], "wheel", 1)
speed = _type_check(speed, [int], "wheel", 2)
code, value = {
"up": ([REL_WHEEL, REL_WHEEL_HI_RES], [1 / 120, 1]),
"down": ([REL_WHEEL, REL_WHEEL_HI_RES], [-1 / 120, -1]),
"left": ([REL_HWHEEL, REL_HWHEEL_HI_RES], [1 / 120, 1]),
"right": ([REL_HWHEEL, REL_HWHEEL_HI_RES], [-1 / 120, -1]),
}[direction.lower()]
async def task(handler: Callable):
resolved_speed = _resolve(speed, [int])
remainder = [0.0, 0.0]
while self.is_holding():
for i in range(0, 2):
float_value = value[i] * resolved_speed + remainder[i]
remainder[i] = math.fmod(float_value, 1)
if abs(float_value) >= 1:
handler(EV_REL, code[i], int(float_value))
await asyncio.sleep(1 / self.mapping.rel_rate)
self.tasks.append(task)
def add_wait(self, time: Union[int, float]):
"""Wait time in milliseconds."""
time = _type_check(time, [int, float], "wait", 1)
async def task(_):
await asyncio.sleep(_resolve(time, [int, float]) / 1000)
self.tasks.append(task)
def add_set(self, variable: str, value):
"""Set a variable to a certain value."""
_type_check_variablename(variable)
async def task(_):
# can also copy with set(a, $b)
resolved_value = _resolve(value)
logger.debug('"%s" set to "%s"', variable, resolved_value)
macro_variables[variable] = value
self.tasks.append(task)
def add_add(self, variable: str, value: Union[int, float]):
"""Add a number to a variable."""
_type_check_variablename(variable)
_type_check(value, [int, float], "value", 1)
async def task(_):
current = macro_variables[variable]
if current is None:
logger.debug('"%s" initialized with 0', variable)
macro_variables[variable] = 0
current = 0
resolved_value = _resolve(value)
if not isinstance(resolved_value, (int, float)):
logger.error('Expected delta "%s" to be a number', resolved_value)
return
if not isinstance(current, (int, float)):
logger.error(
'Expected variable "%s" to contain a number, but got "%s"',
variable,
current,
)
return
logger.debug('"%s" += "%s"', variable, resolved_value)
macro_variables[variable] += value
self.tasks.append(task)
def add_ifeq(self, variable, value, then=None, else_=None):
"""Old version of if_eq, kept for compatibility reasons.
This can't support a comparison like ifeq("foo", $blub) with blub containing
"foo" without breaking old functionality, because "foo" is treated as a
variable name.
"""
_type_check(then, [Macro, None], "ifeq", 3)
_type_check(else_, [Macro, None], "ifeq", 4)
async def task(handler: Callable):
set_value = macro_variables.get(variable)
logger.debug('"%s" is "%s"', variable, set_value)
if set_value == value:
if then is not None:
await then.run(handler)
elif else_ is not None:
await else_.run(handler)
if isinstance(then, Macro):
self.child_macros.append(then)
if isinstance(else_, Macro):
self.child_macros.append(else_)
self.tasks.append(task)
def add_if_eq(self, value_1, value_2, then=None, else_=None):
"""Compare two values."""
_type_check(then, [Macro, None], "if_eq", 3)
_type_check(else_, [Macro, None], "if_eq", 4)
async def task(handler: Callable):
resolved_value_1 = _resolve(value_1)
resolved_value_2 = _resolve(value_2)
if resolved_value_1 == resolved_value_2:
if then is not None:
await then.run(handler)
elif else_ is not None:
await else_.run(handler)
if isinstance(then, Macro):
self.child_macros.append(then)
if isinstance(else_, Macro):
self.child_macros.append(else_)
self.tasks.append(task)
def add_if_tap(self, then=None, else_=None, timeout=300):
"""If a key was pressed quickly.
macro key pressed -> if_tap starts -> key released -> then
macro key pressed -> released (does other stuff in the meantime)
-> if_tap starts -> pressed -> released -> then
"""
_type_check(then, [Macro, None], "if_tap", 1)
_type_check(else_, [Macro, None], "if_tap", 2)
timeout = _type_check(timeout, [int, float], "if_tap", 3)
if isinstance(then, Macro):
self.child_macros.append(then)
if isinstance(else_, Macro):
self.child_macros.append(else_)
async def wait():
"""Wait for a release, or if nothing pressed yet, a press and release."""
if self.is_holding():
await self._trigger_release_event.wait()
else:
await self._trigger_press_event.wait()
await self._trigger_release_event.wait()
async def task(handler: Callable):
resolved_timeout = _resolve(timeout, [int, float]) / 1000
try:
await asyncio.wait_for(wait(), resolved_timeout)
if then:
await then.run(handler)
except asyncio.TimeoutError:
if else_:
await else_.run(handler)
self.tasks.append(task)
def add_if_single(self, then, else_, timeout=None):
"""If a key was pressed without combining it."""
_type_check(then, [Macro, None], "if_single", 1)
_type_check(else_, [Macro, None], "if_single", 2)
if isinstance(then, Macro):
self.child_macros.append(then)
if isinstance(else_, Macro):
self.child_macros.append(else_)
async def task(handler: Callable):
listener_done = asyncio.Event()
async def listener(event):
if event.type != EV_KEY:
# ignore anything that is not a key
return
if event.value == 1:
# another key was pressed, trigger else
listener_done.set()
return
self.context.listeners.add(listener)
resolved_timeout = _resolve(timeout, allowed_types=[int, float, None])
await asyncio.wait(
[
asyncio.Task(listener_done.wait()),
asyncio.Task(self._trigger_release_event.wait()),
],
timeout=resolved_timeout / 1000 if resolved_timeout else None,
return_when=asyncio.FIRST_COMPLETED,
)
self.context.listeners.remove(listener)
if not listener_done.is_set() and self._trigger_release_event.is_set():
if then:
await then.run(handler) # was trigger release
else:
if else_:
await else_.run(handler)
self.tasks.append(task)
def _type_check_symbol(self, keyname: Union[str, Variable]) -> Union[Variable, int]:
"""Same as _type_check, but checks if the key-name is valid."""
if isinstance(keyname, Variable):
# it is a variable and will be read at runtime
return keyname
symbol = str(keyname)
code = system_mapping.get(symbol)
if code is None:
raise MacroParsingError(msg=f'Unknown key "{symbol}"')
if self.mapping is not None:
target = self.mapping.target_uinput
if target is not None and not can_default_uinput_emit(target, EV_KEY, code):
raise SymbolNotAvailableInTargetError(symbol, target)
return code
logger = cast(Logger, logging.getLogger("input-remapper"))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
The provided code snippet includes necessary dependencies for implementing the `parse` function. Write a Python function `def parse(macro: str, context=None, mapping=None, verbose: bool = True)` to solve the following problem:
Parse and generate a Macro that can be run as often as you want. Parameters ---------- macro "repeat(3, key(a).wait(10))" "repeat(2, key(a).key(KEY_A)).key(b)" "wait(1000).modify(Shift_L, repeat(2, k(a))).wait(10, 20).key(b)" context : Context, or None for use in Frontend mapping the mapping for the macro, or None for use in Frontend verbose log the parsing True by default
Here is the function:
def parse(macro: str, context=None, mapping=None, verbose: bool = True):
"""Parse and generate a Macro that can be run as often as you want.
Parameters
----------
macro
"repeat(3, key(a).wait(10))"
"repeat(2, key(a).key(KEY_A)).key(b)"
"wait(1000).modify(Shift_L, repeat(2, k(a))).wait(10, 20).key(b)"
context : Context, or None for use in Frontend
mapping
the mapping for the macro, or None for use in Frontend
verbose
log the parsing True by default
"""
# TODO pass mapping in frontend and do the target check for keys?
logger.debug("parsing macro %s", macro.replace("\n", ""))
macro = clean(macro)
macro = handle_plus_syntax(macro)
macro_obj = _parse_recurse(macro, context, mapping, verbose)
if not isinstance(macro_obj, Macro):
raise MacroParsingError(macro, "The provided code was not a macro")
return macro_obj | Parse and generate a Macro that can be run as often as you want. Parameters ---------- macro "repeat(3, key(a).wait(10))" "repeat(2, key(a).key(KEY_A)).key(b)" "wait(1000).modify(Shift_L, repeat(2, k(a))).wait(10, 20).key(b)" context : Context, or None for use in Frontend mapping the mapping for the macro, or None for use in Frontend verbose log the parsing True by default |
9,844 | import re
import subprocess
from inputremapper.logger import logger
def is_numlock_on():
"""Get the current state of the numlock."""
try:
xset_q = subprocess.check_output(
["xset", "q"],
stderr=subprocess.STDOUT,
).decode()
num_lock_status = re.search(r"Num Lock:\s+(.+?)\s", xset_q)
if num_lock_status is not None:
return num_lock_status[1] == "on"
return False
except (FileNotFoundError, subprocess.CalledProcessError):
# tty
return None
def set_numlock(state):
"""Set the numlock to a given state of True or False."""
if state is None:
return
value = {True: "on", False: "off"}[state]
try:
subprocess.check_output(["numlockx", value])
except subprocess.CalledProcessError:
# might be in a tty
pass
except FileNotFoundError:
# doesn't seem to be installed everywhere
logger.debug("numlockx not found")
The provided code snippet includes necessary dependencies for implementing the `ensure_numlock` function. Write a Python function `def ensure_numlock(func)` to solve the following problem:
Decorator to reset the numlock to its initial state afterwards.
Here is the function:
def ensure_numlock(func):
"""Decorator to reset the numlock to its initial state afterwards."""
def wrapped(*args, **kwargs):
# for some reason, grabbing a device can modify the num lock state.
# remember it and apply back later
numlock_before = is_numlock_on()
result = func(*args, **kwargs)
set_numlock(numlock_before)
return result
return wrapped | Decorator to reset the numlock to its initial state afterwards. |
9,845 | from typing import Dict, Union, Tuple, Optional, List
import evdev
import inputremapper.exceptions
import inputremapper.utils
from inputremapper.logger import logger
DEFAULT_UINPUTS = {
# for event codes see linux/input-event-codes.h
"keyboard": {
evdev.ecodes.EV_KEY: list(evdev.ecodes.KEY.keys() & evdev.ecodes.keys.keys())
},
"gamepad": {
evdev.ecodes.EV_KEY: [*range(0x130, 0x13F)], # BTN_SOUTH - BTN_THUMBR
evdev.ecodes.EV_ABS: [
*(
(i, evdev.AbsInfo(0, MIN_ABS, MAX_ABS, 0, 0, 0))
for i in range(0x00, 0x06)
),
*((i, evdev.AbsInfo(0, -1, 1, 0, 0, 0)) for i in range(0x10, 0x12)),
], # 6-axis and 1 hat switch
},
"mouse": {
evdev.ecodes.EV_KEY: [*range(0x110, 0x118)], # BTN_LEFT - BTN_TASK
evdev.ecodes.EV_REL: [*range(0x00, 0x0D)], # all REL axis
},
}
DEFAULT_UINPUTS["keyboard + mouse"] = {
evdev.ecodes.EV_KEY: [
*DEFAULT_UINPUTS["keyboard"][evdev.ecodes.EV_KEY],
*DEFAULT_UINPUTS["mouse"][evdev.ecodes.EV_KEY],
],
evdev.ecodes.EV_REL: [
*DEFAULT_UINPUTS["mouse"][evdev.ecodes.EV_REL],
],
}
The provided code snippet includes necessary dependencies for implementing the `can_default_uinput_emit` function. Write a Python function `def can_default_uinput_emit(target: str, type_: int, code: int) -> bool` to solve the following problem:
Check if the uinput with the target name is capable of the event.
Here is the function:
def can_default_uinput_emit(target: str, type_: int, code: int) -> bool:
"""Check if the uinput with the target name is capable of the event."""
capabilities = DEFAULT_UINPUTS.get(target, {}).get(type_)
return capabilities is not None and code in capabilities | Check if the uinput with the target name is capable of the event. |
9,846 | from typing import Dict, Union, Tuple, Optional, List
import evdev
import inputremapper.exceptions
import inputremapper.utils
from inputremapper.logger import logger
DEFAULT_UINPUTS = {
# for event codes see linux/input-event-codes.h
"keyboard": {
evdev.ecodes.EV_KEY: list(evdev.ecodes.KEY.keys() & evdev.ecodes.keys.keys())
},
"gamepad": {
evdev.ecodes.EV_KEY: [*range(0x130, 0x13F)], # BTN_SOUTH - BTN_THUMBR
evdev.ecodes.EV_ABS: [
*(
(i, evdev.AbsInfo(0, MIN_ABS, MAX_ABS, 0, 0, 0))
for i in range(0x00, 0x06)
),
*((i, evdev.AbsInfo(0, -1, 1, 0, 0, 0)) for i in range(0x10, 0x12)),
], # 6-axis and 1 hat switch
},
"mouse": {
evdev.ecodes.EV_KEY: [*range(0x110, 0x118)], # BTN_LEFT - BTN_TASK
evdev.ecodes.EV_REL: [*range(0x00, 0x0D)], # all REL axis
},
}
DEFAULT_UINPUTS["keyboard + mouse"] = {
evdev.ecodes.EV_KEY: [
*DEFAULT_UINPUTS["keyboard"][evdev.ecodes.EV_KEY],
*DEFAULT_UINPUTS["mouse"][evdev.ecodes.EV_KEY],
],
evdev.ecodes.EV_REL: [
*DEFAULT_UINPUTS["mouse"][evdev.ecodes.EV_REL],
],
}
The provided code snippet includes necessary dependencies for implementing the `find_fitting_default_uinputs` function. Write a Python function `def find_fitting_default_uinputs(type_: int, code: int) -> List[str]` to solve the following problem:
Find the names of default uinputs that are able to emit this event.
Here is the function:
def find_fitting_default_uinputs(type_: int, code: int) -> List[str]:
"""Find the names of default uinputs that are able to emit this event."""
return [
uinput
for uinput in DEFAULT_UINPUTS
if code in DEFAULT_UINPUTS[uinput].get(type_, [])
] | Find the names of default uinputs that are able to emit this event. |
9,847 | import math
from typing import Dict
import evdev
from evdev.ecodes import (
EV_REL,
REL_WHEEL,
REL_HWHEEL,
REL_WHEEL_HI_RES,
REL_HWHEEL_HI_RES,
)
from inputremapper.configs.input_config import InputCombination, InputConfig
from inputremapper import exceptions
from inputremapper.configs.mapping import (
Mapping,
REL_XY_SCALING,
WHEEL_SCALING,
WHEEL_HI_RES_SCALING,
)
from inputremapper.injection.global_uinputs import global_uinputs
from inputremapper.injection.mapping_handlers.axis_transform import Transformation
from inputremapper.injection.mapping_handlers.mapping_handler import (
MappingHandler,
HandlerEnums,
InputEventHandler,
)
from inputremapper.input_event import InputEvent
from inputremapper.logger import logger
def is_wheel(event) -> bool:
return event.type == EV_REL and event.code in (REL_WHEEL, REL_HWHEEL) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.