id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
188,834 | from mmengine import Registry
from mmdeploy.utils.config_utils import Backend
def __build_backend_wrapper_class(backend: Backend, registry: Registry):
return registry.module_dict[backend.value] | null |
188,835 | from mmengine import Registry
from mmdeploy.utils.config_utils import Backend
def get_backend_wrapper_class(backend: Backend) -> type:
"""Get the backend wrapper class from the registry.
Args:
backend (Backend): The backend enum type.
Returns:
type: The backend wrapper class
"""
retu... | null |
188,836 | from typing import Optional, Sequence
from mmdeploy.utils.device import parse_cuda_device_id
from .utils import create_runtime, register_engines
def parse_cuda_device_id(device: str) -> int:
"""Parse cuda device index from a string.
Args:
device (str): The typical style of string specifying cuda devic... | Convert ONNX to PPLNN. PPLNN is capable of optimizing onnx model. The optimized algorithm is saved into `algo_file` in json format. Note that `input_shapes` actually require multiple shapes of inputs in its original design. But in the pipeline of our codebase, we only pass one input shape which can be modified by users... |
188,837 | import os
import os.path as osp
import tempfile
from subprocess import call
from typing import List, Optional, Union
import onnx
from .init_plugins import get_onnx2ncnn_path
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, ... | Returns the path to the .param, .bin file with export result. Args: onnx_path (str): The path of the onnx model. work_dir (str|None): The path of the directory for saving the results. Defaults to `None`, which means using the directory of onnx_path. Returns: List[str]: The path of the files where the export result will... |
188,838 | import os
import os.path as osp
import tempfile
from subprocess import call
from typing import List, Optional, Union
import onnx
from .init_plugins import get_onnx2ncnn_path
def get_onnx2ncnn_path() -> str:
"""Get mmdeploy_onnx2ncnn path.
Returns:
str: A path of mmdeploy_onnx2ncnn tool.
"""
ca... | Convert ONNX to ncnn. The inputs of ncnn include a model file and a weight file. We need to use an executable program to convert the `.onnx` file to a `.param` file and a `.bin` file. The output files will save to work_dir. Example: >>> from mmdeploy.apis.ncnn import from_onnx >>> onnx_path = 'work_dir/end2end.onnx' >>... |
188,839 | import os
import shutil
from mmdeploy.utils import get_file_path
import os
if os.path.exists(ops_path):
cdll.LoadLibrary(ops_path)
The provided code snippet includes necessary dependencies for implementing the `get_ops_path` function. Write a Python function `def get_ops_path() -> str` to solve the following p... | Get ncnn custom ops library path. Returns: str: The library path of ncnn custom ops. |
188,840 | import os.path as osp
from subprocess import call
from typing import List
import mmengine
from .init_plugins import get_ncnn2int8_path
The provided code snippet includes necessary dependencies for implementing the `get_quant_model_file` function. Write a Python function `def get_quant_model_file(onnx_path: str, work_d... | Returns the path to quant onnx and table with export result. Args: onnx_path (str): The path to the fp32 onnx model. work_dir (str): The path to the directory for saving the results. Returns: List[str]: The path to the files where the export result will be located. |
188,841 | import os.path as osp
from subprocess import call
from typing import List
import mmengine
from .init_plugins import get_ncnn2int8_path
def get_ncnn2int8_path() -> str:
"""Get onnx2int8 path.
Returns:
str: A path of ncnn2int8 tool.
"""
ncnn2int8_path = shutil.which('ncnn2int8')
if ncnn2int8... | Convert ncnn float model to quantized model. The inputs of ncnn include float model and weight file. We need to use a executable program to convert the float model to int8 model with calibration table. Example: >>> from mmdeploy.backend.ncnn.quant import ncnn2int8 >>> param = 'work_dir/end2end.param' >>> bin = 'work_di... |
188,842 | import glob
import logging
import os
import sys
import traceback
from typing import Callable, Optional, Union
from mmdeploy.utils.logging import get_logger
import logging
The provided code snippet includes necessary dependencies for implementing the `target_wrapper` function. Write a Python function `def target_wrapp... | The wrapper used to start a new subprocess. Args: target (Callable): The target function to be wrapped. log_level (int): Log level for logging. ret_value (mp.Value): The success flag of target. Return: Any: The return of target. |
188,843 | import glob
import logging
import os
import sys
import traceback
from typing import Callable, Optional, Union
from mmdeploy.utils.logging import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO) -> logging.Logger:
"""Get root logger.
Args:
log_file (str, optional): File path of log. ... | Deprecate a function or a class. Args: status (str, optional): The status of the function or class. Defaults to future. dst_obj (str, object, optional): The object that will replace the original one. Defaults to None. msg (str): Additional message to be printed. Examples: >>> from math import ceil >>> from mmdeploy.uti... |
188,844 | import glob
import logging
import os
import sys
import traceback
from typing import Callable, Optional, Union
from mmdeploy.utils.logging import get_logger
The provided code snippet includes necessary dependencies for implementing the `get_file_path` function. Write a Python function `def get_file_path(prefix, candida... | Search for file in candidates. Args: prefix (str): Prefix of the paths. candidates (str): Candidate paths Returns: str: file path or '' if not found |
188,845 | from typing import Dict, List, Optional, Union
import mmengine
from .constants import Backend, Codebase, Task
from .utils import deprecate, get_root_logger
def get_ir_config(deploy_cfg: Union[str, mmengine.Config]) -> Dict:
"""Get the IR parameters in export() from config.
Args:
deploy_cfg (str | mmengi... | Get the input shape for static exporting. Args: deploy_cfg (str | mmengine.Config): The path or content of config. Returns: List[int]: The input shape for backend model (axis 2 and 3), e.g [512, 512]. |
188,846 | from typing import Dict, List, Optional, Union
import mmengine
from .constants import Backend, Codebase, Task
from .utils import deprecate, get_root_logger
The provided code snippet includes necessary dependencies for implementing the `cfg_apply_marks` function. Write a Python function `def cfg_apply_marks(deploy_cfg:... | Check if the model needs to be partitioned by checking if the config contains 'apply_marks'. Args: deploy_cfg (str | mmengine.Config): The path or content of config. Returns: bool or None: Whether config contains 'apply_marks'. |
188,847 | import importlib
from mmdeploy.utils import Codebase
def get_library_version(lib):
"""Try to get the version of a library if it has been installed.
Args:
lib (str): The name of library.
Returns:
None | str: If the library has been installed, return version.
"""
try:
lib = imp... | Get the version dictionary of all supported codebases. Returns: Dict: The name and the version of supported codebases. |
188,848 | import importlib
from mmdeploy.utils import Codebase
def get_library_version(lib):
"""Try to get the version of a library if it has been installed.
Args:
lib (str): The name of library.
Returns:
None | str: If the library has been installed, return version.
"""
try:
lib = imp... | Get the version dictionary of some supported backend. Returns: Dict: The name and the version of some supported backend. |
188,849 | import re
from typing import Optional
def parse_cuda_device_id(device: str) -> int:
"""Parse cuda device index from a string.
Args:
device (str): The typical style of string specifying cuda device,
e.g.: 'cuda:0'.
Returns:
int: The parsed device id, defaults to `0`.
"""
m... | Parse device index from a string. Args: device (str): The typical style of string specifying device, e.g.: 'cuda:0', 'cpu'. Returns: Optional[int]: The return value depends on the type of device. If device is 'cuda': cuda device index, defaults to `0`. If device is 'cpu': `-1`. Otherwise, `None` will be returned. |
188,850 | import re
from typing import Optional
The provided code snippet includes necessary dependencies for implementing the `parse_device_type` function. Write a Python function `def parse_device_type(device: str) -> str` to solve the following problem:
Parse device type from a string. Args: device (str): The typical style o... | Parse device type from a string. Args: device (str): The typical style of string specifying cuda device, e.g.: 'cuda:0', 'cpu', 'npu'. Returns: str: The parsed device type such as 'cuda', 'cpu', 'npu'. |
188,851 | from torch.utils.data import Dataset
The provided code snippet includes necessary dependencies for implementing the `is_can_sort_dataset` function. Write a Python function `def is_can_sort_dataset(dataset: Dataset) -> bool` to solve the following problem:
Checking for the possibility of sorting the dataset by fields '... | Checking for the possibility of sorting the dataset by fields 'height' and 'width'. Args: dataset (Dataset): The dataset. Returns: bool: Is it possible or not to sort the dataset. |
188,852 | from torch.utils.data import Dataset
The provided code snippet includes necessary dependencies for implementing the `sort_dataset` function. Write a Python function `def sort_dataset(dataset: Dataset) -> Dataset` to solve the following problem:
Sorts the dataset by image height and width. Args: dataset (Dataset): The ... | Sorts the dataset by image height and width. Args: dataset (Dataset): The dataset. Returns: Dataset: Sorted dataset. |
188,853 | import asyncio
import os
import shutil
import torch
from pyppeteer import launch
from torchvision.models import resnet18
from mmdeploy.core import FUNCTION_REWRITER, RewriterContext, patch_model
from mmdeploy.utils import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `f... | Rewrite the forward implementation of resnet. Early return the feature map after two down-sampling steps. |
188,854 | import asyncio
import os
import shutil
import torch
from pyppeteer import launch
from torchvision.models import resnet18
from mmdeploy.core import FUNCTION_REWRITER, RewriterContext, patch_model
from mmdeploy.utils import get_root_logger
def rewrite_resnet18(original_path: str, rewritten_path: str):
# prepare inpu... | null |
188,855 | import asyncio
import os
import shutil
import torch
from pyppeteer import launch
from torchvision.models import resnet18
from mmdeploy.core import FUNCTION_REWRITER, RewriterContext, patch_model
from mmdeploy.utils import get_root_logger
def screen_size():
async def visualize(original_path: str, rewritten_path: str):
... | null |
188,856 | import argparse
import math
import cv2
from mmdeploy_runtime import Detector
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',
he... | null |
188,857 | import argparse
from math import cos, sin
import cv2
import numpy as np
from mmdeploy_runtime import RotatedDetector
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_ar... | null |
188,858 | import argparse
import cv2
from mmdeploy_runtime import TextDetector, TextRecognizer
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument('image_path', help='path ... | null |
188,859 | import argparse
import cv2
from mmdeploy_runtime import VideoRecognizer
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',
help='p... | null |
188,860 | import argparse
import cv2
from mmdeploy_runtime import VideoRecognizer
def SampleFrames(cap, clip_len, frame_interval, num_clips):
if not cap.isOpened():
print('failed to load video')
exit(-1)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
ori_clip_len = clip_len * frame_interval
... | null |
188,861 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import Segmentor
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',
... | null |
188,862 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import Segmentor
def get_palette(num_classes=256):
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
return [tuple(c) for c in ... | null |
188,863 | import argparse
import os
import cv2
import numpy as np
from mmdeploy_runtime import PoseTracker
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use SDK Python API')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'det... | null |
188,864 | import argparse
import os
import cv2
import numpy as np
from mmdeploy_runtime import PoseTracker
VISUALIZATION_CFG = dict(
coco=dict(
skeleton=[(15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11),
(6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2),
(0, 1), (0,... | null |
188,865 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import PoseDetector
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',... | null |
188,866 | import argparse
import cv2
from mmdeploy_runtime import Restorer
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path', help='path of SDK mode... | null |
188,867 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import Detector, PoseDetector
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use SDK Python API')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'de... | null |
188,868 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import Detector, PoseDetector
def visualize(frame, keypoints, filename, thr=0.5, resize=1280):
skeleton = [(15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11),
(6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2),
... | null |
188,869 | import argparse
import cv2
from mmdeploy_runtime import Classifier
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',
help='path o... | null |
188,870 | import argparse
import json
import cv2
from mmdeploy_runtime import Context, Device, Model, Pipeline
def parse_args():
parser = argparse.ArgumentParser(
description='Demo of MMDeploy SDK pipeline API')
parser.add_argument('device', help='name of device, cuda or cpu')
parser.add_argument('det_model_... | null |
188,871 | import grpc
import inference_pb2 as inference__pb2
class Inference(object):
"""The inference service definition."""
def Echo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=Non... | null |
188,872 | import argparse
import logging
from mmdeploy.backend.tensorrt import from_onnx
from mmdeploy.backend.tensorrt.utils import get_trt_log_level
from mmdeploy.utils import (get_common_config, get_model_inputs,
get_root_logger, load_config)
def parse_args():
parser = argparse.ArgumentParser(... | null |
188,873 | import argparse
import numpy as np
from texttable import Texttable
def parse_args():
parser = argparse.ArgumentParser(
description='Analyze sdk profiler file tool.')
parser.add_argument('profile_file', help='SDK profile file path')
args = parser.parse_args()
return args | null |
188,874 | import argparse
import numpy as np
from texttable import Texttable
def get_name(addr, prev, addr2name, used_addr, depth, skip):
node_name = addr2name[addr] if not skip else ''
if addr not in prev:
return ' ' * depth * 4 + node_name
prev_addr = prev[addr]
if prev_addr in used_addr:
depth... | null |
188,875 | import argparse
import os
import os.path as osp
import yaml
from mmengine import Config
from mmdeploy.utils import get_backend, get_task_type, load_config
def parse_args():
parser = argparse.ArgumentParser(
description='from yaml export markdown table')
parser.add_argument('yml_file', help='input yml c... | null |
188,876 | import argparse
import logging
from copy import deepcopy
from mmengine import Config
from torch.utils.data import DataLoader
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_root_logger, load_config
class QuantizationImageDataset(Dataset):
def __init__(
self,
pat... | null |
188,877 | import argparse
import logging
from copy import deepcopy
from mmengine import Config
from torch.utils.data import DataLoader
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_root_logger, load_config
def parse_args():
parser = argparse.ArgumentParser(
description='Generate... | null |
188,878 | import argparse
import logging
from mmdeploy.apis.ncnn import from_onnx
from mmdeploy.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Convert ONNX to ncnn.')
parser.add_argument('onnx_path', help='ONNX model path')
parser.add_argument('output_prefix', help='outp... | null |
188,879 | import argparse
import logging
from mmdeploy.apis.snpe import from_onnx
from mmdeploy.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(
description='Convert ONNX to snpe dlc format.')
parser.add_argument('onnx_path', help='ONNX model path')
parser.add_argument('output... | null |
188,880 | from mmcv.utils import collect_env as collect_base_env
from mmengine.utils import get_git_hash
import mmdeploy
from mmdeploy.utils import get_codebase_version, get_root_logger
The provided code snippet includes necessary dependencies for implementing the `collect_env` function. Write a Python function `def collect_env... | Collect the information of the running environments. |
188,881 | from mmcv.utils import collect_env as collect_base_env
from mmengine.utils import get_git_hash
import mmdeploy
from mmdeploy.utils import get_codebase_version, get_root_logger
def check_backend():
from mmdeploy.backend.base import get_backend_manager
from mmdeploy.utils import Backend
exclude_backend_lists... | null |
188,882 | from mmcv.utils import collect_env as collect_base_env
from mmengine.utils import get_git_hash
import mmdeploy
from mmdeploy.utils import get_codebase_version, get_root_logger
def check_codebase():
codebase_versions = get_codebase_version()
for k, v in codebase_versions.items():
logger.info(f'{k}:\t{v}... | null |
188,883 | import argparse
import logging
import os
import os.path as osp
from functools import partial
import mmengine
import torch.multiprocessing as mp
from torch.multiprocessing import Process, set_start_method
from mmdeploy.apis import (create_calib_input_data, extract_model,
get_predefined_partiti... | null |
188,884 | import argparse
import logging
import os
import os.path as osp
from functools import partial
import mmengine
import torch.multiprocessing as mp
from torch.multiprocessing import Process, set_start_method
from mmdeploy.apis import (create_calib_input_data, extract_model,
get_predefined_partiti... | null |
188,885 | import argparse
import logging
import os
import os.path as osp
from functools import partial
import mmengine
import torch.multiprocessing as mp
from torch.multiprocessing import Process, set_start_method
from mmdeploy.apis import (create_calib_input_data, extract_model,
get_predefined_partiti... | Return the conversion function from torch to the intermediate representation. Args: ir_type (IR): The type of the intermediate representation. |
188,886 | import argparse
import logging
import os
import os.path as osp
from mmdeploy.apis import (extract_model, get_predefined_partition_cfg,
torch2onnx)
from mmdeploy.utils import (get_ir_config, get_partition_config,
get_root_logger, load_config)
def parse_args():
... | null |
188,887 | import argparse
import logging
import os.path as osp
from copy import deepcopy
from typing import Optional, Sequence
import h5py
import tqdm
from mmengine import Config
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_root_logger, load_config
def get_tensor_func(model, input_data):
... | null |
188,888 | import argparse
import logging
import os.path as osp
from copy import deepcopy
from typing import Optional, Sequence
import h5py
import tqdm
from mmengine import Config
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_root_logger, load_config
def parse_args():
parser = argparse.A... | null |
188,889 | import argparse
import logging
import os.path as osp
import onnx
import onnx.helper
from mmdeploy.apis.onnx import extract_partition
from mmdeploy.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(
description='Extract model based on markers.')
parser.add_argument('input_m... | null |
188,890 | import argparse
import logging
import os.path as osp
import onnx
import onnx.helper
from mmdeploy.apis.onnx import extract_partition
from mmdeploy.utils import get_root_logger
def collect_avaiable_marks(model):
marks = []
for node in model.graph.node:
if node.op_type == 'Mark':
for attr in ... | null |
188,891 | import argparse
import os
import os.path as osp
import pathlib
import shutil
import subprocess
from glob import glob
import mmcv
import yaml
from mmdeploy.backend.sdk.export_info import (get_preprocess,
get_transform_static)
from mmdeploy.utils import get_root_logger, load_... | null |
188,892 | import argparse
import os
import os.path as osp
import pathlib
import shutil
import subprocess
from glob import glob
import mmcv
import yaml
from mmdeploy.backend.sdk.export_info import (get_preprocess,
get_transform_static)
from mmdeploy.utils import get_root_logger, load_... | null |
188,893 | import argparse
import collections
import logging
from mmdeploy.apis.pplnn import from_onnx
from mmdeploy.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Convert ONNX to PPLNN.')
parser.add_argument('onnx_path', help='ONNX model path')
parser.add_argument(
... | null |
188,894 | import argparse
import glob
import os.path as osp
import numpy as np
import torch
from mmengine import DictAction
from prettytable import PrettyTable
from mmdeploy.apis import build_task_processor
from mmdeploy.utils import get_root_logger
from mmdeploy.utils.config_utils import (Backend, get_backend, get_input_shape,
... | null |
188,895 | import argparse
import glob
import os.path as osp
import numpy as np
import torch
from mmengine import DictAction
from prettytable import PrettyTable
from mmdeploy.apis import build_task_processor
from mmdeploy.utils import get_root_logger
from mmdeploy.utils.config_utils import (Backend, get_backend, get_input_shape,
... | null |
188,896 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def install_pplcv(dep_dir, build_cuda):
print('-' * 10 + 'install pplcv' + '-' * 10)
time.sleep(2)
os.chdir(dep_dir)
pplcv_dir = os.path.join(dep_dir, 'ppl.cv')
# git clone
... | null |
188,897 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def install_pplnn(dep_dir, build_cuda):
print('-' * 10 + 'install pplnn' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
pplnn_dir = os.path.join(dep_... | null |
188,898 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def install_mmdeploy(work_dir, pplnn_cmake_dir, pplcv_cmake_dir, build_cuda):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os... | null |
188,899 | import os
import os.path as osp
import sys
import time
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def cmd_result(txt: str):
def install_llvm(dep_dir):
print('-' * 10 + 'install llvm' + '-' * 10)
os.chdir(dep_dir)
os.system(
'wget --no-check-certificate -O - https://apt.llvm.org... | null |
188,900 | import os
import os.path as osp
import sys
import time
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def install_tvm(dep_dir):
print('-' * 10 + 'build and install tvm' + '-' * 10)
time.sleep(2)
os.system('sudo apt-get update')
os.system(
'sudo apt-get install -y python3 python3... | null |
188,901 | import os
import os.path as osp
import sys
import time
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def install_mmdeploy(work_dir, tvm_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git su... | null |
188,902 | import os
import re
import time
def cmd_result(txt: str):
cmd = os.popen(txt)
return cmd.read().rstrip().lstrip()
def get_job(argv) -> int:
# get nprocs, if user not specified, use max(1, nproc-2)
job = 2
if len(argv) <= 1:
print('your can use `python3 {} N` to set make -j [N]'.format(argv[... | null |
188,903 | import os
import re
import time
def version_minor(txt: str) -> int:
return int(txt.split('.')[1]) | null |
188,904 | import os
import re
import time
def cmd_result(txt: str):
def version_major(txt: str) -> int:
def simple_check_install(bin: str, sudo: str) -> str:
def ensure_base_env(work_dir, dep_dir):
description = """
check python, root, pytorch version, auto install these binary:
* make
* g++
* git
* wge... | null |
188,905 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import ensure_base_env, get_job
def install_ort(dep_dir):
print('-' * 10 + 'install ort' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
# install python onnxruntime
os.system('python3 -m pi... | null |
188,906 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import ensure_base_env, get_job
g_jobs = 2
def install_mmdeploy(work_dir, ort_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git ... | null |
188,907 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import (cmd_result, cu_version_name, ensure_base_env,
get_job, pytorch_version)
def pytorch_version():
def cmd_result(txt: str):
def cu_version_name(version: str) -> str:
def install_libtorch(dep_dir):
... | null |
188,908 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import (cmd_result, cu_version_name, ensure_base_env,
get_job, pytorch_version)
g_jobs = 2
def install_mmdeploy(work_dir, libtorch_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
... | null |
188,909 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def cmd_result(txt: str):
cmd = os.popen(txt)
return cmd.read().rstrip().lstrip()
The provided code snippet includes necessary dependencies for implementing the `install_protobuf`... | build and install protobuf. protobuf seems not support repeated install, so clean build first. Args: wor_dir (_type_): _description_ Returns: : _description_ |
188,910 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def install_pyncnn(dep_dir):
print('-' * 10 + 'build and install pyncnn' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
# git clone
if... | null |
188,911 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def install_mmdeploy(work_dir, dep_dir, ncnn_cmake_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodul... | null |
188,912 | import argparse
import os
import sys
from distutils.util import get_platform
import yaml
def parse_arguments():
parser = argparse.ArgumentParser(
description='MMDeploy create build config')
parser.add_argument(
'--backend',
required=True,
type=str,
help='target backend. ... | null |
188,913 | import argparse
import os
import sys
from distutils.util import get_platform
import yaml
def generate_config(args):
config = {}
cmake_cfg = {}
# wheel platform tag
if args.system in ['linux']:
config['PLATFORM_TAG'] = 'manylinux2014_x86_64'
elif args.system in ['jetson']:
config['P... | null |
188,914 | import argparse
import copy
import logging
import os
import os.path as osp
import platform
import re
import shutil
import sys
from glob import glob
from subprocess import check_output, run
from typing import Dict
import yaml
from packaging import version
def create_mmdeploy(cfg: Dict, work_dir: str):
def create_mmdeplo... | null |
188,915 | import argparse
import copy
import logging
import os
import os.path as osp
import platform
import re
import shutil
import sys
from glob import glob
from subprocess import check_output, run
from typing import Dict
import yaml
from packaging import version
def parse_args():
parser = argparse.ArgumentParser(descripti... | null |
188,916 | import argparse
import copy
import logging
import os
import os.path as osp
import platform
import re
import shutil
import sys
from glob import glob
from subprocess import check_output, run
from typing import Dict
import yaml
from packaging import version
def parse_configs(cfg_path: str):
with open(cfg_path, mode='... | null |
188,917 | import os
import os.path as osp
import platform
import sys
version_file = osp.join(CURDIR, 'mmdeploy_runtime', 'version.py')
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
188,918 | import os
import os.path as osp
import platform
import sys
def get_platform_name():
return platform.machine() | null |
188,919 | import os
import os.path as osp
import platform
import sys
def parse_arg_remove_boolean(argv, arg_name):
arg_value = False
if arg_name in sys.argv:
arg_value = True
argv.remove(arg_name)
return arg_value | null |
188,920 | import config
import tiktoken
import openai
openai.api_key = config.openai_api_key
async def is_content_acceptable(prompt):
r = await openai.Moderation.acreate(input=prompt)
return not all(r.results[0].categories.values()) | null |
188,921 | import io
import logging
import asyncio
import traceback
import html
import json
from datetime import datetime
import openai
import telegram
from telegram import (
Update,
User,
InlineKeyboardButton,
InlineKeyboardMarkup,
BotCommand
)
from telegram.ext import (
Application,
ApplicationBuilde... | null |
188,922 | import os
from typing import Dict, List, Optional, Tuple, Union
import torch
from fastchat.conversation import (compute_skip_echo_len,
get_default_conv_template)
from fastchat.serve.inference import load_model as load_fastchat_model
from langchain.llms.base import LLM
from langchain.l... | null |
188,923 | import os
from typing import Dict, List, Optional, Tuple, Union
import torch
from fastchat.conversation import (compute_skip_echo_len,
get_default_conv_template)
from fastchat.serve.inference import load_model as load_fastchat_model
from langchain.llms.base import LLM
from langchain.l... | null |
188,924 | import os
import gradio as gr
import nltk
import torch
from chatglm_llm import ChatGLM
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.prompts import PromptTemplate
from la... | null |
188,925 | import os
import gradio as gr
import nltk
import torch
from chatglm_llm import ChatGLM
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.prompts import PromptTemplate
from la... | null |
188,926 | import os
from typing import List, Optional
import torch
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE
def torch_gc():
if t... | null |
188,927 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingf... | null |
188,928 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingf... | null |
188,929 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingf... | null |
188,930 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingf... | null |
188,931 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingf... | null |
188,932 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingf... | null |
188,933 | import os
import gradio as gr
import nltk
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from paddle_embedding import PaddleNL... | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.