input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.19.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.19.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version... |
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
... | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
... |
from __future__ import annotations
import os
from copy import deepcopy
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datas... | from __future__ import annotations
import os
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datasets_available
from tests.u... |
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_A... | import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_A... |
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_pandas_dataframe_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_impo... | from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_pandas_dataframe_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_impo... |
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from sklearn import metrics
from sklearn.ensemble import StackingClassifier, StackingRegressor
from sklearn.utils._testing import assert_docstring_consistency, skip_if_no_numpydoc
CLASS_DOCSTRING_CONSISTENCY_CASES = [
{
... | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from sklearn import metrics
from sklearn.ensemble import StackingClassifier, StackingRegressor
from sklearn.utils._testing import assert_docstring_consistency, skip_if_no_numpydoc
CLASS_DOCSTRING_CONSISTENCY_CASES = [
{
... |
import hashlib
import os
from pathlib import Path
from typing import List
from urllib.parse import quote, urlencode
import requests
from docutils import nodes
from docutils.parsers.rst.directives.images import Image
from sphinx.util.docutils import SphinxDirective
_THIS_DIR = Path(__file__).parent
# Color palette f... | import hashlib
from pathlib import Path
from typing import List
from urllib.parse import quote, urlencode
import requests
from docutils import nodes
from docutils.parsers.rst.directives.images import Image
_THIS_DIR = Path(__file__).parent
# Color palette from PyTorch Developer Day 2021 Presentation Template
YELLOW... |
"""Tracers that call listeners."""
from collections.abc import Awaitable
from typing import TYPE_CHECKING, Callable, Optional, Union
from langchain_core.runnables.config import (
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
)
from langchain_core.tracers.base import AsyncBas... | from collections.abc import Awaitable
from typing import TYPE_CHECKING, Callable, Optional, Union
from langchain_core.runnables.config import (
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
)
from langchain_core.tracers.base import AsyncBaseTracer, BaseTracer
from langchain_c... |
_base_ = 'solov2_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736)... | _base_ = 'solov2_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
# TODO: Update after mmcv.RandomChoiceResize finish refactor
type='Rando... |
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
file_to_skip = ['fastAPI', 'jina']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a... | import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
... |
# Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform objec... | # Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform objec... |
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
imag... | import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
imag... |
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, rat... | _base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pip... |
import torch
from docarray import BaseDoc
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(BaseDoc):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor... | import torch
from docarray import BaseDocument
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(BaseDocument):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstanc... |
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting imp... | # Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting imp... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, DocumentArray, Executor
from pdf_segmenter import PDFSegmenter
from PIL import Image
@pytest.fixture()
def executor():
return PDFSegmenter()
@pytest.fix... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
from jina import Executor
from jina.executors import BaseExecutor
from PIL import Image
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'conf... |
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
... | _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_c... |
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .memory_profiler_hook import MemoryProfilerHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .sync_random_si... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .sync_random_size_hook import SyncRandomSizeHook
from .yolox_lrupdat... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(ty... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(ty... |
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
... | from typing import Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
i... |
"""Argparser module for the export API"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def set_export_parser(parser=None):
"""Set the parser for exporting
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_pa... | """Argparser module for the export API"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def set_export_parser(parser=None):
"""Set the parser for exporting
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_pa... |
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class Sparse... | from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: Spars... |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
... | """Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
... |
from docarray.typing.id import ID
from docarray.typing.tensor import NdArray, Tensor, TorchEmbedding, TorchTensor
from docarray.typing.tensor.embedding import Embedding
from docarray.typing.url import AnyUrl, ImageUrl, TextUrl
__all__ = [
'TorchTensor',
'NdArray',
'Embedding',
'ImageUrl',
'TextUrl'... | from docarray.typing.id import ID
from docarray.typing.tensor import NdArray, Tensor, TorchTensor
from docarray.typing.tensor.embedding import Embedding
from docarray.typing.url import AnyUrl, ImageUrl, TextUrl
__all__ = [
'TorchTensor',
'NdArray',
'Embedding',
'ImageUrl',
'TextUrl',
'AnyUrl',
... |
_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
init_cfg=dict(
type='Pretra... | _base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
init_cfg=dict(
type='Pretra... |
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLO',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
... | _base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
# model settings
model = dict(
type='SOLO',
prep... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import HybridTaskCascadeRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import HybridTaskCascadeRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_... |
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
... | _base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
0.9))))
# dataset settings
dat... |
import csv
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, s... | import csv
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, s... |
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas... | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
... |
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
bbox_head=dict(
num_classes=601,
anchor_generator=dict(basesize_ratio_range=(0.2, 0.9))))
# dataset settings
dataset_typ... | _base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
bbox_head=dict(
num_classes=601,
anchor_generator=dict(basesize_ratio_range=(0.2, 0.9))))
# dataset settings
dataset_typ... |
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
def _uri_to_blob(uri: str, **kwargs) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:param kwargs: keyword arguments to pass to `urlopen` such as timeout
:ret... | import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
def _uri_to_blob(uri: str) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:return: blob bytes.
"""
if urllib.parse.urlparse(uri).scheme in {'http', 'http... |
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
type='RPN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(... | # model settings
model = dict(
type='RPN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='P... |
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
... | from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
... |
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
from jina.parsers.logging import mixin_suppress_root_logging_parser
def mixin_flow_features_parser(parser):
... | """Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param... |
from typing import cast
from pydantic import SecretStr
from langchain_community.embeddings import MiniMaxEmbeddings
def test_initialization_with_alias() -> None:
"""Test minimax embedding model initialization with alias."""
api_key = "your-api-key"
group_id = "your-group-id"
embeddings = MiniMaxEmb... | from typing import cast
from pydantic import SecretStr
from langchain_community.embeddings import MiniMaxEmbeddings
def test_initialization_with_alias() -> None:
"""Test minimax embedding model initialization with alias."""
api_key = "your-api-key"
group_id = "your-group-id"
embeddings = MiniMaxEmb... |
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import construct_toy_data, create_full_masks, create_random_bboxes
__all__ = ['create_random_bboxes', 'create_full_masks', 'construct_toy_data']
| # Copyright (c) OpenMMLab. All rights reserved.
from .utils import create_random_bboxes
__all__ = ['create_random_bboxes']
|
"""Test text splitting functionality using NLTK and Spacy based sentence splitters."""
from typing import Any
import nltk
import pytest
from langchain_core.documents import Document
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.spacy import SpacyTextSplitter
def setup_mod... | """Test text splitting functionality using NLTK and Spacy based sentence splitters."""
from typing import Any
import nltk
import pytest
from langchain_core.documents import Document
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.spacy import SpacyTextSplitter
def setup_mod... |
from typing import Any, Dict, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.Bound... | from typing import Any, Dict, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (features.BoundingBox,)
def __init__(self, format: U... |
"""AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT... | """AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT... |
from __future__ import annotations
import asyncio
import threading
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.ainetwork.utils i... | from __future__ import annotations
import asyncio
import threading
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.ainetwork.utils i... |
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new la... | from torch import nn, Tensor
from typing import Iterable, Dict
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as de... |
# Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, voc_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import avera... | from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, voc_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .... |
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pip... | _base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pip... |
from .bifpn import BiFPN
from .efficientdet import EfficientDet
from .efficientdet_head import EfficientDetSepBNHead
from .huber_loss import HuberLoss
from .tensorflow.anchor_generator import YXYXAnchorGenerator
from .tensorflow.coco_90class import Coco90Dataset
from .tensorflow.coco_90metric import Coco90Metric
from .... | from .anchor_generator import YXYXAnchorGenerator
from .bifpn import BiFPN
from .coco_90class import Coco90Dataset
from .coco_90metric import Coco90Metric
from .efficientdet import EfficientDet
from .efficientdet_head import EfficientDetSepBNHead
from .trans_max_iou_assigner import TransMaxIoUAssigner
from .yxyx_bbox_c... |
_base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
... | _base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
input_size=input_size,
strides=[8, 16, 32, 64, 128, 256, 512],
basesize_ratio_range=(0.15, 0.9),
ratios... |
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backen... | import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backen... |
"""Init file of LlamaIndex."""
__version__ = "0.12.13"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... | """Init file of LlamaIndex."""
__version__ = "0.12.12"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... |
from typing import TYPE_CHECKING
import paddle
if TYPE_CHECKING:
from paddle import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np... | from typing import TYPE_CHECKING
import paddle
if TYPE_CHECKING:
from paddle import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np... |
"""Test the comparison chains."""
import re
import pytest
from langchain.evaluation.comparison.eval_chain import (
LabeledPairwiseStringEvalChain,
PairwiseStringEvalChain,
PairwiseStringResultOutputParser,
resolve_pairwise_criteria,
)
from langchain.evaluation.criteria.eval_chain import Criteria
from... | """Test the comparison chains."""
import re
import pytest
from langchain.evaluation.comparison.eval_chain import (
LabeledPairwiseStringEvalChain,
PairwiseStringEvalChain,
PairwiseStringResultOutputParser,
resolve_pairwise_criteria,
)
from langchain.evaluation.criteria.eval_chain import Criteria
from... |
from .depends import requires_admin_user, requires_user
from .jwt_utils import parse_jwt_token
from .middleware import APIKeyValidator, auth_middleware
from .models import User
__all__ = [
"parse_jwt_token",
"requires_user",
"requires_admin_user",
"APIKeyValidator",
"auth_middleware",
"User",
]... | from .config import Settings
from .depends import requires_admin_user, requires_user
from .jwt_utils import parse_jwt_token
from .middleware import APIKeyValidator, auth_middleware
from .models import User
__all__ = [
"Settings",
"parse_jwt_token",
"requires_user",
"requires_admin_user",
"APIKeyVal... |
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc... | # THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc... |
from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__... | from __future__ import annotations
from typing import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
... |
import os
from abc import abstractmethod
from typing import Union
from unittest import mock
import pytest
from langchain_core.tools import BaseTool
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class ToolsTests(BaseStandardTests):
""":private:
Base class for testing tools... | import os
from abc import abstractmethod
from typing import Union
from unittest import mock
import pytest
from langchain_core.tools import BaseTool
from pydantic import SecretStr
from langchain_tests.base import BaseStandardTests
class ToolsTests(BaseStandardTests):
"""
:private:
Base class for testing ... |
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import torch
from mmengine.dist import get_rank, sync_random_seed
from mmengine.logging import print_log
from mmengine.utils import digit_version, is_list_of
from mmengine.utils.dl_... | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import random
from typing import List, Optional, Tuple
import numpy as np
import torch
from mmengine.dist import get_rank, sync_random_seed
from mmengine.logging import print_log
from mmengine.utils import digit_version, is_list_of
from mmengine.utils.dl_... |
from typing import TYPE_CHECKING, Union, BinaryIO
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING:
from docarray.typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blo... | from typing import TYPE_CHECKING, Union, BinaryIO
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING:
from docarray.typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blo... |
import pytest
from langchain_core.agents import (
AgentActionMessageLog,
AgentFinish,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)... | import pytest
from langchain_core.agents import (
AgentActionMessageLog,
AgentFinish,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)... |
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
logger = logging.getLogger(__name__)
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {col... | from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
... |
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dic... | # dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dic... |
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
templ1 = """You are a smart assistant desi... | # flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
templ1 = """You are a smart ... |
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussia... | # Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussia... |
from pathlib import Path
import librosa
import pytest
from jina import Document, DocumentArray, Executor
from tensorflow.python.framework import ops
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2... | from pathlib import Path
import librosa
from jina import Document, DocumentArray, Executor
from tensorflow.python.framework import ops
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.ym... |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import (DeltaXYWHBBoxCoder,
DeltaXYWHBBoxCoderForGLIP)
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.... | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import DeltaXYWHBBoxCoder
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5.... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.cache import (
AstraDBCache,
AstraDBSemanticCache,
AzureCosmosDBSemanticCache,
CassandraCache,
CassandraSemanticCache,
FullLLMCache,
F... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.cache import (
AstraDBCache,
AstraDBSemanticCache,
AzureCosmosDBSemanticCache,
CassandraCache,
CassandraSemanticCache,
FullLLMCache,
F... |
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.us... | from datetime import datetime, timezone
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrat... |
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDoc):
price: int
class MyDoc(BaseDoc):
tensor: AnyTen... | import numpy as np
from docarray import BaseDoc
from docarray.array import DocArrayStacked
from docarray.array.stacked.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDoc):
price: int
class MyDoc(BaseDoc):
tenso... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
from mmengine.utils import is_str
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[f... |
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .compose import... | # Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .compose import... |
import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.runtimes.servers import BaseServer
class WebSocketServer(BaseServer):
"""WebSocket Server implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
... | import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.runtimes.servers import BaseServer
class WebSocketServer(BaseServer):
"""WebSocket Server implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
... |
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import (
SentenceTransformerModelCardCallback,
SentenceTransformerModelCardData,
)
from sentence_transformers.util imp... | from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from huggingface_hub import ModelCard
from sentence_transformers.model_card import (
SentenceTransformerModelCardCallback,
SentenceTransformerModelCardData... |
from typing import TypeVar
from docarray.proto import NodeProto
from docarray.typing.tensor import NdArray
T = TypeVar('T', bound='Embedding')
class Embedding(NdArray):
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This functio... | from typing import TypeVar
from docarray.proto import NodeProto
from docarray.typing.tensor import Tensor
T = TypeVar('T', bound='Embedding')
class Embedding(Tensor):
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function ... |
# Backwards compatibility.
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import (
LLM,
BaseLLM,
)
__all__ = [
"LLM",
"BaseLLM",
"BaseLanguageModel",
]
| # Backwards compatibility.
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import (
LLM,
BaseLLM,
)
__all__ = [
"BaseLanguageModel",
"BaseLLM",
"LLM",
]
|
import json
import logging
import os
from typing import Dict, List
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: List[str], word_weights: Dict[... | import torch
from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
import logging
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: List[str], wo... |
from typing import List
from torch.utils.data import Dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples... | from torch.utils.data import Dataset
from typing import List
import torch
from .. import SentenceTransformer
from ..readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples in a SentencesDataset
an... |
from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# ... | from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# ... |
"""Bing Search tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
ENDPOINT_BASE_URL = "https://api.bing.microsoft.com/v7.0/"
class BingSearchToolSpec(BaseToolSpec):
"""Bing Search tool spec."""
spec_functions = ["bing_news_search"... | """Bing Search tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
ENDPOINT_BASE_URL = "https://api.bing.microsoft.com/v7.0/"
class BingSearchToolSpec(BaseToolSpec):
"""Bing Search tool spec."""
spec_functions = ["bing_news_search"... |
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .Transformer import Transformer
from .Weighte... | from .Transformer import Transformer
from .Asym import Asym
from .BoW import BoW
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .WeightedLayerPooling import WeightedLaye... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.bu... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.bu... |
_base_ = './cascade-mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pyt... | _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pyt... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_bbox_coder import BaseBBoxCoder
from .bucketing_bbox_coder import BucketingBBoxCoder
from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
from .pseudo_bbox_coder import PseudoBBoxCoder
from .tb... | from .base_bbox_coder import BaseBBoxCoder
from .bucketing_bbox_coder import BucketingBBoxCoder
from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
from .pseudo_bbox_coder import PseudoBBoxCoder
from .tblr_bbox_coder import TBLRBBoxCoder
from .yolo_bb... |
from .custom_image_torch_encoder import CustomImageTorchEncoder
| from .custom_image_torch_encoder import CustomImageTorchEncoder |
from unittest import TestCase
from datasets import List, Value
from datasets.arrow_dataset import Dataset
class DatasetListTest(TestCase):
def _create_example_records(self):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
... | from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class DatasetListTest(TestCase):
def _create_example_records(self):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"}... |
from typing import Dict, List
import torch
from torchaudio._internal import module_utils as _mod_utils
@_mod_utils.requires_sox()
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
t... | from typing import List, Dict
import torch
from torchaudio._internal import module_utils as _mod_utils
@_mod_utils.requires_sox()
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
t... |
"""Tests using Scikit-Learn's bundled estimator_checks."""
from contextlib import contextmanager
import pytest
import sklearn
from packaging.version import parse as parse_version
from sklearn.utils.estimator_checks import parametrize_with_checks
import keras
from keras.src.backend import floatx
from keras.src.backen... | """Tests using Scikit-Learn's bundled estimator_checks."""
from contextlib import contextmanager
import pytest
import sklearn
from packaging.version import parse as parse_version
from sklearn.utils.estimator_checks import parametrize_with_checks
import keras
from keras.src.backend import floatx
from keras.src.backen... |
import numpy as np
from keras.src.api_export import keras_export
@keras_export(
[
"keras.utils.pad_sequences",
"keras.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
... | import numpy as np
from keras.src.api_export import keras_export
@keras_export(
[
"keras.utils.pad_sequences",
"keras.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
... |
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_d... | from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_d... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information: virtual memory, swap
memory and memory of current process.
Args:
interval (int): Checking interv... |
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
class ImageTabularChartReader(BaseReader):
"""
Image parser.
Extract tabular data from a chart or figure.
"""
def __... | from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
class ImageTabularChartReader(BaseReader):
"""Image parser.
Extract tabular data from a chart or figure.
"""
def __init_... |
from typing import AsyncGenerator, Generator, Optional
import pytest
from jina import Client, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.helper import random_port
class MyDocument(Document):
text: str
number: Optional[int]
class OutputDocument(Document):
text: str
... | from typing import Optional
import pytest
from jina import Client, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.helper import random_port
class MyDocument(Document):
text: str
number: Optional[int]
class MyExecutor(Executor):
@requests(on='/hello')
async def task... |
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True... | _base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=Tru... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy
from keras.src.dtype_policies.dtype_policy import... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy
from keras.src.dtype_policies.dtype_policy import dtype_policy
f... |
from argparse import ArgumentParser
from pathlib import Path
import mir_eval
import torch
from lightning_train import _get_dataloader, _get_model, sisdri_metric
def _eval(model, data_loader, device):
results = torch.zeros(4)
with torch.no_grad():
for _, batch in enumerate(data_loader):
mi... | from argparse import ArgumentParser
from pathlib import Path
import mir_eval
import torch
from lightning_train import _get_dataloader, _get_model, sisdri_metric
def _eval(model, data_loader, device):
results = torch.zeros(4)
with torch.no_grad():
for _, batch in enumerate(data_loader):
mi... |
from collections import Counter
from typing import Tuple, Dict, Union, Optional, TYPE_CHECKING
import numpy as np
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri
if TYPE_CHECKING:
from docarray.typing import T
class TextDataMixin:
"""Provide helper functions for :class:`Document` to s... | from collections import Counter
from typing import Tuple, Dict, Union, Optional, TYPE_CHECKING
import numpy as np
from .helper import _uri_to_blob, _to_datauri
if TYPE_CHECKING:
from ...typing import T
class TextDataMixin:
"""Provide helper functions for :class:`Document` to support text data."""
def ... |
from jina import Executor, requests
class MyExecutorToReload1(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorAfterReload'
| from jina import Executor, requests
class MyExecutorToReload1(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorAfterReload'
|
"""Copyright 2024, XGBoost contributors"""
import json
import os
import tempfile
from typing import Type, Union
import numpy as np
import pytest
import xgboost as xgb
pl = pytest.importorskip("polars")
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
def test_polars_basic(
DMatrixT: Un... | """Copyright 2024, XGBoost contributors"""
import json
import os
import tempfile
from typing import Type, Union
import numpy as np
import pytest
import xgboost as xgb
pl = pytest.importorskip("polars")
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
def test_polars_basic(
DMatrixT: Un... |
import re
import tempfile
import unittest
from pathlib import Path
from datasets.utils.metadata import DatasetMetadata
def _dedent(string: str) -> str:
indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines())
return "\n".join([line[indent_level:] for line in stri... | import re
import tempfile
import unittest
from pathlib import Path
from datasets.utils.metadata import DatasetMetadata
def _dedent(string: str) -> str:
indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines())
return "\n".join([line[indent_level:] for line in stri... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.