input
stringlengths
33
5k
output
stringlengths
32
5k
from backend.blocks.jina._auth import ( JinaCredentials, JinaCredentialsField, JinaCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request import requests class JinaEmbeddingBlock(Block): cla...
import requests from backend.blocks.jina._auth import ( JinaCredentials, JinaCredentialsField, JinaCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField class JinaEmbeddingBlock(Block): class Input(BlockSchema): ...
from typing import IO, TYPE_CHECKING, Callable, Optional from docarray.utils._internal.misc import import_library def _compress_bytes(data: bytes, algorithm: Optional[str] = None) -> bytes: if algorithm == 'lz4': if TYPE_CHECKING: from lz4 import frame else: lz4 = import_l...
from typing import IO, Callable, Optional def _compress_bytes(data: bytes, algorithm: Optional[str] = None) -> bytes: if algorithm == 'lz4': import lz4.frame # type: ignore data = lz4.frame.compress(data) elif algorithm == 'bz2': import bz2 data = bz2.compress(data) elif...
import copy import sqlite3 import warnings from dataclasses import dataclass, field from tempfile import NamedTemporaryFile from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union from docarray.array.storage.sqlite.helper import initialize_table from docarray.array.storage.base.backend import BaseBackendMixi...
import sqlite3 import warnings from dataclasses import dataclass, field from tempfile import NamedTemporaryFile from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union from docarray.array.storage.sqlite.helper import initialize_table from docarray.array.storage.base.backend import BaseBackendMixin from docar...
import contextlib from collections.abc import Iterable from pathlib import Path from typing import Any from tomlkit import dump, inline_table, load from tomlkit.items import InlineTable def _get_dep_inline_table(path: Path) -> InlineTable: dep = inline_table() dep.update({"path": str(path), "develop": True})...
from collections.abc import Iterable from pathlib import Path from typing import Any from tomlkit import dump, inline_table, load from tomlkit.items import InlineTable def _get_dep_inline_table(path: Path) -> InlineTable: dep = inline_table() dep.update({"path": str(path), "develop": True}) return dep ...
import sys from jina.parsers import set_gateway_parser from jina.parsers.helper import _update_gateway_args from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler def run(*args, **kwargs): runtime_args = set_gateway_parser().pars...
import sys from jina.parsers import set_gateway_parser from jina.parsers.helper import _update_gateway_args from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler def run(*args, **kwargs): runtime_args = set_gateway_parser().pars...
_base_ = 'faster-rcnn_r50_fpn_crop640-50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict(out_channels=128, inter_channels=128), rpn_head=dict(in_channels=128), roi_head=dict( bbox_roi_extractor=dict(out_channels=128), bbox_head=dict(in_channels=128)))
_base_ = 'faster_rcnn_r50_fpg_crop640_50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict(out_channels=128, inter_channels=128), rpn_head=dict(in_channels=128), roi_head=dict( bbox_roi_extractor=dict(out_channels=128), bbox_head=dict(in_channels=128)))
from unittest import TestCase import numpy as np from mmengine.testing import assert_allclose from mmdet.structures.mask import BitmapMasks, PolygonMasks class TestMaskStructures(TestCase): def test_bitmap_translate_same_size(self): mask_array = np.zeros((5, 10, 10), dtype=np.uint8) mask_array[...
from unittest import TestCase import numpy as np from mmengine.testing import assert_allclose from mmdet.structures.mask import BitmapMasks class TestMaskStructures(TestCase): def test_bitmap_translate_same_size(self): mask_array = np.zeros((5, 10, 10), dtype=np.uint8) mask_array[:, 0:5, 0:5] =...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.structures import DetDataSample from mmdet.testing import demo_mm_inputs, get_detector_cfg from mmdet.utils import register_all_modules class TestRPN(TestCase...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.structures import DetDataSample from mmdet.testing import demo_mm_inputs, get_detector_cfg from mmdet.utils import register_all_modules class TestRPN(TestCase...
""" This is a simple application for sentence embeddings: clustering Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied. """ from sklearn.cluster import AgglomerativeClustering from sentence_transformers import SentenceTransformer embedder = SentenceTransformer...
""" This is a simple application for sentence embeddings: clustering Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied. """ from sentence_transformers import SentenceTransformer from sklearn.cluster import AgglomerativeClustering embedder = SentenceTransformer(...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from executor.torch_encoder import ImageTorchEncoder from pytest_mock import MockerFixture from torch import hub def test_load_from_url(tmpdir: str, mocker: MockerFixture) -> None: os.environ['TORCH_H...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from pytest_mock import MockerFixture from torch import hub from ...torch_encoder import ImageTorchEncoder def test_load_from_url(tmpdir: str, mocker: MockerFixture) -> None: os.environ['TORCH_HOME']...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
import pytest import random import string import logging from llama_index.core.schema import ( TextNode, RelatedNodeInfo, NodeRelationship, ) from llama_index.vector_stores.lindorm import ( LindormVectorStore, LindormVectorClient, ) from llama_index.core.vector_stores.types import ( VectorStoreQ...
import pytest import random import string import logging from llama_index.core.schema import ( TextNode, RelatedNodeInfo, NodeRelationship, ) from llama_index.vector_stores.lindorm import ( LindormVectorStore, LindormVectorClient, ) from llama_index.core.vector_stores.types import ( VectorStoreQ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_loaders.langsmith import ( LangSmithDatasetChatLoader, LangSmithRunChatLoader, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate log...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_loaders.langsmith import ( LangSmithDatasetChatLoader, LangSmithRunChatLoader, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate log...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16, force_fp32 from mmdet.models.utils import ResLayer, SimplifiedBasicBlock from mmdet.registry import MODELS @MODELS.register_module() class GlobalContextHead(BaseModule)...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16, force_fp32 from mmdet.models.builder import HEADS from mmdet.models.utils import ResLayer, SimplifiedBasicBlock @HEADS.register_module() class GlobalContextHead(BaseMod...
# Copyright (c) OpenMMLab. All rights reserved. from torch import Tensor from mmdet.data_elements import SampleList from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .faster_rcnn import FasterRCNN @MODELS.register_module() class TridentFasterRCNN(FasterRCNN): ...
# Copyright (c) OpenMMLab. All rights reserved. from torch import Tensor from mmdet.core import SampleList from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .faster_rcnn import FasterRCNN @MODELS.register_module() class TridentFasterRCNN(FasterRCNN): ""...
from ._vggish_pipeline import VGGISH as _VGGISH, VGGishBundle from torchaudio._internal.module_utils import dropping_const_support VGGISH = dropping_const_support(_VGGISH, "VGGISH") __all__ = ["VGGISH", "VGGishBundle"]
from ._vggish_pipeline import VGGISH, VGGishBundle __all__ = ["VGGISH", "VGGishBundle"]
import os import torchaudio import torchvision from torch.utils.data import Dataset def _load_list(args, *filenames): output = [] length = [] for filename in filenames: filepath = os.path.join(args.root_dir, "labels", filename) for line in open(filepath).read().splitlines(): d...
import os import torchaudio import torchvision from torch.utils.data import Dataset def _load_list(args, *filenames): output = [] length = [] for filename in filenames: filepath = os.path.join(os.path.dirname(args.dataset_path), filename) for line in open(filepath).read().splitlines(): ...
import math from typing import List, Optional from llama_index.core.agent.react.types import ( BaseReasoningStep, ResponseReasoningStep, ) from llama_index.core.bridge.pydantic import Field, BaseModel from llama_index.core.prompts import PromptTemplate # taken from the paper DEFAULT_REFLECTION_PROMPT_STR = ""...
import math from typing import List, Optional from llama_index.core.agent.react.types import ( BaseReasoningStep, ResponseReasoningStep, ) from llama_index.core.bridge.pydantic import Field, BaseModel from llama_index.core.prompts import PromptTemplate # taken from the paper DEFAULT_REFLECTION_PROMPT_STR = ""...
from typing import TYPE_CHECKING from docarray.utils._internal.misc import import_library if TYPE_CHECKING: from google.protobuf import __version__ as __pb__version__ else: protobuf = import_library('google.protobuf', raise_error=True) __pb__version__ = protobuf.__version__ if __pb__version__.startswith...
from google.protobuf import __version__ as __pb__version__ if __pb__version__.startswith('4'): from docarray.proto.pb.docarray_pb2 import ( DictOfAnyProto, DocArrayStackedProto, DocumentArrayProto, DocumentProto, ListOfAnyProto, ListOfDocArrayProto, NdArrayPr...
from __future__ import annotations from typing import Any from langchain_text_splitters.base import TextSplitter class NLTKTextSplitter(TextSplitter): """Splitting text using NLTK package.""" def __init__( self, separator: str = "\n\n", language: str = "english", *, ...
from __future__ import annotations from typing import Any, List from langchain_text_splitters.base import TextSplitter class NLTKTextSplitter(TextSplitter): """Splitting text using NLTK package.""" def __init__( self, separator: str = "\n\n", language: str = "english", *, ...
import textwrap import pyarrow as pa import pytest from datasets import Features, Image from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.text.text import Text, TextConfig from ..utils import require_pil @pytest.fixture def text_file(tmp_pat...
import textwrap import pyarrow as pa import pytest from datasets import Features, Image from datasets.packaged_modules.text.text import Text from ..utils import require_pil @pytest.fixture def text_file(tmp_path): filename = tmp_path / "text.txt" data = textwrap.dedent( """\ Lorem ipsum dol...
from keras.src import ops from keras.src.api_export import keras_export from keras.src.backend.common.keras_tensor import KerasTensor from keras.src.layers.input_spec import InputSpec from keras.src.layers.layer import Layer @keras_export("keras.layers.Permute") class Permute(Layer): """Permutes the dimensions of...
from keras.src import ops from keras.src.api_export import keras_export from keras.src.backend.common.keras_tensor import KerasTensor from keras.src.layers.input_spec import InputSpec from keras.src.layers.layer import Layer @keras_export("keras.layers.Permute") class Permute(Layer): """Permutes the dimensions of...
from __future__ import annotations from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseTripletLoss(TripletLoss): def __init__( self, model: SparseEncoder, distance_metric=TripletDi...
from __future__ import annotations from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseTripletLoss(TripletLoss): def __init__( self, model: SparseEncoder, distance_metric=TripletDi...
from __future__ import annotations from enum import Enum from typing import Any, Mapping, Optional, Sequence, Tuple, Union import torch from torch.utils._pytree import tree_flatten from ._tv_tensor import TVTensor class BoundingBoxFormat(Enum): """Coordinate format of a bounding box. Available formats are...
from __future__ import annotations from enum import Enum from typing import Any, Mapping, Optional, Sequence, Tuple, Union import torch from torch.utils._pytree import tree_flatten from ._tv_tensor import TVTensor class BoundingBoxFormat(Enum): """[BETA] Coordinate format of a bounding box. Available form...
"""Argparser module for Deployment runtimes""" import argparse from jina.enums import DeploymentRoleType from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group from jina.parsers.orchestrate.runtimes.remote import _mixin_http_server_parser def mixin_base_deployment_parser(parser): """Add m...
"""Argparser module for Deployment runtimes""" import argparse from jina.enums import DeploymentRoleType from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group from jina.parsers.orchestrate.runtimes.remote import _mixin_http_server_parser def mixin_base_deployment_parser(parser): """Add mi...
import wave from typing import Union, BinaryIO, TYPE_CHECKING import numpy as np if TYPE_CHECKING: from docarray.typing import T class AudioDataMixin: """Provide helper functions for :class:`Document` to support audio data.""" def save_audio_tensor_to_file( self: 'T', file: Union[str, B...
import wave from typing import Union, BinaryIO, TYPE_CHECKING import numpy as np if TYPE_CHECKING: from ...typing import T class AudioDataMixin: """Provide helper functions for :class:`Document` to support audio data.""" def save_audio_tensor_to_file( self: 'T', file: Union[str, BinaryI...
from typing import TYPE_CHECKING, Dict, Iterable from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SequentialEvaluator(SentenceEvaluator): """ This evaluator allows that multi...
from typing import TYPE_CHECKING, Dict, Iterable from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SequentialEvaluator(SentenceEvaluator): """ This evaluator allows that multi...
from __future__ import annotations import pytest from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler from sentence_transformers.sampler import RoundRobinBatchSampler from sentence_transformers.util import is_datasets_available if is_datasets_available(): from datasets import Dataset else:...
from __future__ import annotations import pytest from datasets import Dataset from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler from sentence_transformers.sampler import RoundRobinBatchSampler DATASET_LENGTH = 25 @pytest.fixture def dummy_concat_dataset() -> ConcatDataset: """ Dum...
import pytest from docarray import DocumentArray from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.qdrant import QdrantConfig from docarray.array.storage.weaviate...
import pytest from docarray import DocumentArray from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.qdrant import QdrantConfig from docarray.array.storage.weaviate...
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from torch.utils.checkpoint import checkpoint from ..builder import NECKS @NECKS.register_module() class HRFPN(BaseModule): """HRFP...
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from torch.utils.checkpoint import checkpoint from ..builder import NECKS @NECKS.register_module() class HRFPN(BaseModule): """HRFPN (High Resolution Feature Pyramids) paper:...
from typing import Any, Dict, Optional from llama_index.core.storage.kvstore.types import BaseKVStore from llama_index.storage.kvstore.azurecosmosnosql import AzureCosmosNoSqlKVStore DEFAULT_INDEX_DATABASE = "IndexStoreDB" DEFAULT_INDEX_CONTAINER = "IndexStoreContainer" class AzureCosmosNoSqlIndexStore(BaseKVStore)...
from typing import Any, Dict, Optional from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore from llama_index.storage.kvstore.azurecosmosnosql import AzureCosmosNoSqlKVStore DEFAULT_INDEX_DATABASE = "IndexStoreDB" DEFAULT_INDEX_CONTAINER = "IndexStoreContainer" class AzureCosmosNoSqlIndex...
from .autograd_utils import use_deterministic_algorithms from .case_utils import ( disabledInCI, HttpServerMixin, PytorchTestCase, skipIfCudaSmallMemory, skipIfNoAudioDevice, skipIfNoCtcDecoder, skipIfNoCuCtcDecoder, skipIfNoCuda, skipIfNoExec, skipIfNoFFmpeg, skipIfNoHWAccel...
from .autograd_utils import use_deterministic_algorithms from .backend_utils import set_audio_backend from .case_utils import ( disabledInCI, HttpServerMixin, PytorchTestCase, skipIfCudaSmallMemory, skipIfNoAudioDevice, skipIfNoCtcDecoder, skipIfNoCuCtcDecoder, skipIfNoCuda, skipIfNo...
from pathlib import Path import dask.array as da import numpy as np from distributed import Client, LocalCluster from sklearn.datasets import load_svmlight_file import lightgbm as lgb if __name__ == "__main__": print("loading data") rank_example_dir = Path(__file__).absolute().parents[2] / "lambdarank" ...
from pathlib import Path import dask.array as da import numpy as np from distributed import Client, LocalCluster from sklearn.datasets import load_svmlight_file import lightgbm as lgb if __name__ == "__main__": print("loading data") rank_example_dir = Path(__file__).absolute().parents[2] / 'lambdarank' ...
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501 from collections.abc import Sequence def _get_sub_deps(packages: Sequence[str]) -> list[str]: """Get any specified sub-dependencies.""" from importlib import metadata sub_deps = set() ...
"""**sys_info** prints information about the system and langchain packages for debugging purposes. """ from collections.abc import Sequence def _get_sub_deps(packages: Sequence[str]) -> list[str]: """Get any specified sub-dependencies.""" from importlib import metadata sub_deps = set() _underscored_...
""" This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage:...
""" This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage:...
from typing import TYPE_CHECKING, Union import numpy as np if TYPE_CHECKING: # pragma: no cover from docarray.typing import T import trimesh class Mesh: FILE_EXTENSIONS = [ 'glb', 'obj', 'ply', ] VERTICES = 'vertices' FACES = 'faces' class MeshDataMixin: """Pro...
from typing import TYPE_CHECKING, Union import numpy as np if TYPE_CHECKING: # pragma: no cover from docarray.typing import T import trimesh class Mesh: FILE_EXTENSIONS = [ 'glb', 'obj', 'ply', ] VERTICES = 'vertices' FACES = 'faces' class MeshDataMixin: """Pro...
# mypy: allow-untyped-defs from typing import Callable, Optional, Union import torch from .base_structured_sparsifier import BaseStructuredSparsifier __all__ = ["FPGMPruner"] class FPGMPruner(BaseStructuredSparsifier): r"""Filter Pruning via Geometric Median (FPGM) Structured Pruner This sparsifier prune ...
# mypy: allow-untyped-defs from typing import Callable, Optional, Union import torch from .base_structured_sparsifier import BaseStructuredSparsifier __all__ = ["FPGMPruner"] class FPGMPruner(BaseStructuredSparsifier): r"""Filter Pruning via Geometric Median (FPGM) Structured Pruner This sparsifier prune ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # hand...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # hand...
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
from dataclasses import dataclass, asdict, field from typing import ( Union, Dict, Optional, TYPE_CHECKING, Iterable, List, Tuple, ) import numpy as np from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap from docarray.helper import dataclass_from_dict, filter_dict, _s...
from dataclasses import dataclass, asdict, field from typing import ( Union, Dict, Optional, TYPE_CHECKING, Iterable, List, Tuple, ) import numpy as np from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap from docarray.helper import dataclass_from_dict, filter_dict, _s...
import logging import typing import autogpt_libs.auth.depends import autogpt_libs.auth.middleware import fastapi import prisma import backend.data.graph import backend.integrations.creds_manager import backend.integrations.webhooks.graph_lifecycle_hooks import backend.server.v2.library.db import backend.server.v2.lib...
import logging import typing import autogpt_libs.auth.depends import autogpt_libs.auth.middleware import fastapi import prisma import backend.data.graph import backend.integrations.creds_manager import backend.integrations.webhooks.graph_lifecycle_hooks import backend.server.v2.library.db import backend.server.v2.lib...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='YOLOF', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(ty...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='YOLOF', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(ty...
_base_ = 'ssd300_coco.py' # model settings input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 2...
_base_ = 'ssd300_coco.py' # model settings input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 2...
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py' model = dict( backbone=dict( depth=101, dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict(type='Pretrained', checkpoint='torchvisio...
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' model = dict( backbone=dict( depth=101, dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict(type='Pretrained', checkpoint='torchvis...
from typing import Iterable, Union from docarray import Document, DocumentArray from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like methods for DocumentArray with Redis as storage""" def __eq__(self, other): ...
from typing import Iterable, Union from docarray import Document, DocumentArray from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like methods for DocumentArray with Redis as storage""" def __eq__(self, other): ...
"""langchain-core version information and utilities.""" VERSION = "0.3.64"
"""langchain-core version information and utilities.""" VERSION = "0.3.63"
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False...
import re import sys meetup_svg = '.github/images/meetup.svg' readme_md = 'README.md' conf_py = 'docs/conf.py' def rm_announce(): # remove all announcement with open(readme_md, encoding='utf-8') as fp: _old = fp.read() _new = re.sub( r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->...
import re import sys meetup_svg = '.github/images/meetup.svg' readme_md = 'README.md' conf_py = 'docs/conf.py' def rm_announce(): # remove all announcement with open(readme_md) as fp: _old = fp.read() _new = re.sub( r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)', rf...
"""langchain-core version information and utilities.""" VERSION = "0.3.60"
"""langchain-core version information and utilities.""" VERSION = "0.3.59"
""" This script contains an example how to perform semantic search with Elasticsearch. You need Elasticsearch up and running locally: https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.rea...
""" This script contains an example how to perform semantic search with Elasticsearch. You need Elasticsearch up and running locally: https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.rea...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.jaxarray import JaxArray, metaJax from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='Vid...
import pytest from langchain_core.utils.iter import batch_iterate @pytest.mark.parametrize( "input_size, input_iterable, expected_output", [ (2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]), (3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]), (1, [100, 200, 300], [[100], [200], [300]]), ...
import pytest from langchain_core.utils.iter import batch_iterate @pytest.mark.parametrize( "input_size, input_iterable, expected_output", [ (2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]), (3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]), (1, [100, 200, 300], [[100], [200], [300]]), ...
from __future__ import annotations import sys from .classification import CrossEncoderClassificationEvaluator from .correlation import CrossEncoderCorrelationEvaluator from .deprecated import ( CEBinaryAccuracyEvaluator, CEBinaryClassificationEvaluator, CECorrelationEvaluator, CEF1Evaluator, CERer...
from __future__ import annotations # TODO: Consider renaming all evaluators to CrossEncoder..., e.g. CrossEncoderNanoBEIREvaluator, CrossEncoderClassificationEvaluator, etc. from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator fro...
_base_ = './cascade-rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch...
_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class PAA(SingleStageDetector): """Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_.""" def __init__(self, backbone, ...
from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class PAA(SingleStageDetector): """Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_.""" def __init__(self, backbone, neck, bbox_head, ...
_base_ = [ '../_base_/models/faster-rcnn_r50-caffe-c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
_base_ = [ '../_base_/models/faster-rcnn_r50-caffe-c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ ...
"""Dump objects to json.""" import json from typing import Any from pydantic import BaseModel from langchain_core.load.serializable import Serializable, to_json_not_implemented def default(obj: Any) -> Any: """Return a default value for an object. Args: obj: The object to serialize to json if it i...
"""Dump objects to json.""" import json from typing import Any from pydantic import BaseModel from langchain_core.load.serializable import Serializable, to_json_not_implemented def default(obj: Any) -> Any: """Return a default value for an object. Args: obj: The object to serialize to json if it i...
import json from jina.logging.logger import JinaLogger from jina.parsers import set_gateway_parser from jina.serve.runtimes.gateway.http_fastapi_app import get_fastapi_app from jina.serve.runtimes.gateway.streamer import GatewayStreamer JINA_LOGO_URL = 'https://api.jina.ai/logo/logo-product/jina-core/horizontal-layou...
import json from jina.logging.logger import JinaLogger from jina.parsers import set_gateway_parser from jina.serve.runtimes.gateway.http_fastapi_app import get_fastapi_app from jina.serve.runtimes.gateway.streamer import GatewayStreamer JINA_LOGO_URL = 'https://api.jina.ai/logo/logo-product/jina-core/horizontal-layou...
_base_ = './htc_hrnetv2p_w40_20e_coco.py' # learning policy max_epochs = 28 train_cfg = dict(max_epochs=max_epochs) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, by_epo...
_base_ = './htc_hrnetv2p_w40_20e_coco.py' # learning policy lr_config = dict(step=[24, 27]) runner = dict(type='EpochBasedRunner', max_epochs=28)
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
__all__ = ['reduce', 'reduce_all'] from typing import Dict, List, Optional from docarray import DocList def reduce( left: DocList, right: DocList, left_id_map: Optional[Dict] = None ) -> 'DocList': """ Reduces left and right DocList into one DocList in-place. Changes are applied to the left DocList....
"""**OutputParser** classes parse the output of an LLM call. **Class hierarchy:** .. code-block:: BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser **Main helpers:** .. code-block:: Serializable, Generation, PromptValue """ # noqa: E501 from import...
"""**OutputParser** classes parse the output of an LLM call. **Class hierarchy:** .. code-block:: BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser **Main helpers:** .. code-block:: Serializable, Generation, PromptValue """ # noqa: E501 from langch...
import json from typing import Any, Callable, Iterator, List, Mapping, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document RecordHandler = Callable[[Any, Optional[str]], Document] class AirbyteCDKReader(BaseReader): """ AirbyteCDKReader reader. Ret...
import json from typing import Any, Callable, Iterator, List, Mapping, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document RecordHandler = Callable[[Any, Optional[str]], Document] class AirbyteCDKReader(BaseReader): """AirbyteCDKReader reader. Retrieve...
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class Translation: """`Feature` for translations with fixed languages per example. Here for compatiblity with tfd...
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class Translation: """`Feature` for translations with fixed languages per example. Here for compatibl...
import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class VideoFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """BuilderConfig for ImageFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post_...
from typing import List import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class VideoFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """BuilderConfig for ImageFolder.""" drop_labels: bool = None drop_metadata: boo...
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
import weakref from keras.src.backend.common import global_state def set_tensor_attr(tensor, attr, value): try: setattr(tensor, attr, value) except AttributeError: attr_dict = global_state.get_global_attribute(f"{attr}_dict") if attr_dict is None: if value is None: ...
import weakref from keras.src.backend.common import global_state def set_tensor_attr(tensor, attr, value): try: setattr(tensor, attr, value) except AttributeError: if value is None: return attr_dict = global_state.get_global_attribute(f"{attr}_dict") if attr_dict i...
from typing import TYPE_CHECKING, Optional, Type, TypeVar from pydantic import AnyUrl as BaseAnyUrl from pydantic import errors, parse_obj_as from docarray.typing.abstract_type import AbstractType from docarray.typing.proto_register import _register_proto if TYPE_CHECKING: from pydantic.networks import Parts ...
from typing import TYPE_CHECKING, Optional, Type, TypeVar from pydantic import AnyUrl as BaseAnyUrl from pydantic import errors, parse_obj_as from docarray.typing.abstract_type import AbstractType if TYPE_CHECKING: from pydantic.networks import Parts from docarray.proto import NodeProto T = TypeVar('T', bo...
import csv import os from pathlib import Path from typing import Dict, List, Tuple, Union import torchaudio from torch import Tensor from torch.utils.data import Dataset def load_commonvoice_item( line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str ) -> Tuple[Tensor, int, Dict[str, s...
import csv import os from pathlib import Path from typing import Dict, List, Tuple, Union import torchaudio from torch import Tensor from torch.utils.data import Dataset def load_commonvoice_item( line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str ) -> Tuple[Tensor, int, Dict[str, s...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_simcse_from_file.py path/to/sentences.txt """ import gzi...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_simcse_from_file.py path/to/sentences.txt """ import gzi...
import copy from typing import Dict, Tuple from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler _SPECIFIC_EXECUTOR_SEPARATOR = '__' def _spit_key_and_executor_name(key_name: str) -> Tuple[str]: """Split a specific key into a key, name pair ex: 'key__my_executor' will be ...
import copy from typing import Dict, Tuple from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler _SPECIFIC_EXECUTOR_SEPARATOR = '__' def _spit_key_and_executor_name(key_name: str) -> Tuple[str]: """Split a specific key into a key, name pair ex: 'key__my_executor' will be ...
from abc import abstractmethod from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union from docarray import Document, DocumentArray from docarray.math import ndarray from docarray.score import NamedScore from qdrant_client.http import models as rest from qdrant_client.http.models.models import...
from abc import abstractmethod from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union from docarray import Document, DocumentArray from docarray.math import ndarray from docarray.score import NamedScore from qdrant_client.http import models as rest from qdrant_client.http.models.models import...
# Copyright (c) OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean, sync_random_seed) from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor, generate_coordinate, mask2ndarray, multi_apply,...
# Copyright (c) OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, ...
from typing import TYPE_CHECKING if TYPE_CHECKING: from ..providers import ProviderName from ._base import BaseWebhooksManager _WEBHOOK_MANAGERS: dict["ProviderName", type["BaseWebhooksManager"]] = {} # --8<-- [start:load_webhook_managers] def load_webhook_managers() -> dict["ProviderName", type["BaseWebhoo...
from typing import TYPE_CHECKING if TYPE_CHECKING: from ..providers import ProviderName from ._base import BaseWebhooksManager _WEBHOOK_MANAGERS: dict["ProviderName", type["BaseWebhooksManager"]] = {} # --8<-- [start:load_webhook_managers] def load_webhook_managers() -> dict["ProviderName", type["BaseWebhoo...
# Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import warnings from typing import Union from mmengine.config import Config, ConfigDict from mmengine.logging import print_log def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working...
# Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import warnings from mmengine.config import Config, ConfigDict from mmengine.logging import print_log def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working directory. Args: ...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') tra...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') tra...
# Copyright (c) OpenMMLab. All rights reserved. from .augment_wrappers import AutoAugment, RandAugment from .colorspace import (AutoContrast, Brightness, Color, ColorTransform, Contrast, Equalize, Invert, Posterize, Sharpness, Solarize, SolarizeAdd) from .formatting imp...
# Copyright (c) OpenMMLab. All rights reserved. from .augment_wrappers import AutoAugment, RandAugment from .colorspace import (AutoContrast, Brightness, Color, ColorTransform, Contrast, Equalize, Invert, Posterize, Sharpness, Solarize, SolarizeAdd) from .formatting imp...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='VFNet', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], ...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='VFNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, ...
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause def check_matplotlib_support(caller_name): """Raise ImportError with detailed error message if mpl is not installed. Plot utilities like any of the Display's plotting functions should lazily import matplotlib and call this hel...
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause def check_matplotlib_support(caller_name): """Raise ImportError with detailed error message if mpl is not installed. Plot utilities like any of the Display's plotting functions should lazily import matplotlib and call this hel...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../common/lsj-200e_coco-detection.py' ] image_size = (1024, 1024) batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] model = dict(data_preprocessor=dict(batch_augments=batch_augments)) train_dataloader = dict(batch_size=8, num_workers=4) #...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../common/lsj_200e_coco_detection.py' ] image_size = (1024, 1024) batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] model = dict(data_preprocessor=dict(batch_augments=batch_augments)) train_dataloader = dict(batch_size=8, num_workers=4) #...
from __future__ import annotations import json import logging from typing import Any, Dict, List, Literal, Optional import requests from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models.llms import LLM from langchain_core.utils import convert_to_secret_str, get_from_dict_or...
from __future__ import annotations import json import logging from typing import Any, Dict, List, Literal, Optional import requests from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models.llms import LLM from langchain_core.utils import convert_to_secret_str, get_from_dict_or...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Dict, Optional, Tuple import numpy as np import paddlehub as hub from jina import DocumentArray, Executor, requests from jina_commons.batching import get_docs_batch_generator class TextPaddl...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Dict, Optional, Tuple import numpy as np import paddlehub as hub from jina import DocumentArray, Executor, requests from jina_commons.batching import get_docs_batch_generator class TextPaddl...
from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, ImageBlock, LLMMetadata, MessageRole, TextBlock, AudioBlock, ) from llama_index.core.llm...
from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, ImageBlock, LLMMetadata, MessageRole, TextBlock, ) from llama_index.core.llms.custom import ...
import os import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDoc from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTens...
import os import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDoc from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor from docarray.utils._internal.misc import is_tf_availa...
from docarray import BaseDoc, DocList def test_instance_and_equivalence(): class MyDoc(BaseDoc): text: str docs = DocList[MyDoc]([MyDoc(text='hello')]) assert issubclass(DocList[MyDoc], DocList[MyDoc]) assert issubclass(docs.__class__, DocList[MyDoc]) assert isinstance(docs, DocList[MyD...
from docarray import BaseDoc, DocArray def test_instance_and_equivalence(): class MyDoc(BaseDoc): text: str docs = DocArray[MyDoc]([MyDoc(text='hello')]) assert issubclass(DocArray[MyDoc], DocArray[MyDoc]) assert issubclass(docs.__class__, DocArray[MyDoc]) assert isinstance(docs, DocArr...
import numpy as np import pytest from keras.src import backend from keras.src import layers from keras.src import testing def squared_l2_norm(x): x = backend.convert_to_numpy(x) return np.sum(x**2) class UnitNormalizationTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_un_ba...
import numpy as np import pytest from keras.src import backend from keras.src import layers from keras.src import testing def squared_l2_norm(x): x = backend.convert_to_numpy(x) return np.sum(x**2) class UnitNormalizationTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_un_ba...
import warnings from abc import ABC from typing import Any, BinaryIO, Dict, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils.misc import is_notebook T = TypeVar('T', bound='AbstractAudioTensor') MAX_INT_16 = 2**15 class AbstractAudioTensor(AbstractTensor, ABC): ...
import warnings import wave from abc import ABC from typing import BinaryIO, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils.misc import is_notebook T = TypeVar('T', bound='AbstractAudioTensor') MAX_INT_16 = 2**15 class AbstractAudioTensor(AbstractTensor, ABC): ...
import pytest from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface from pytest_httpx import HTTPXMock from requests_mock import Mocker from contextlib import contextmanager import os from typing import Generator, Any @pytest.fixture() def mock_local_models(httpx_mock: HTTPXMock, base_url: str): ...
import pytest from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface from pytest_httpx import HTTPXMock from requests_mock import Mocker from contextlib import contextmanager import os from typing import Generator, Any @pytest.fixture() def mock_local_models(httpx_mock: HTTPXMock, base_url: str): ...
"""Build configuration""" import dataclasses from typing import Any, Dict, List, Optional @dataclasses.dataclass class BuildConfiguration: # pylint: disable=R0902 """Configurations use when building libxgboost""" # Whether to hide C++ symbols in libxgboost.so hide_cxx_symbols: bool = True # Whether ...
"""Build configuration""" import dataclasses from typing import Any, Dict, List, Optional @dataclasses.dataclass class BuildConfiguration: # pylint: disable=R0902 """Configurations use when building libxgboost""" # Whether to hide C++ symbols in libxgboost.so hide_cxx_symbols: bool = True # Whether ...
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class HfFileSystem(AbstractFileSystem): """Interfa...
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class HfFileSystem(AbstractFileSystem): """Interfa...
import subprocess import sys import pytest from pytest_benchmark.fixture import BenchmarkFixture # type: ignore @pytest.mark.parametrize( "import_path", [ pytest.param( "from langchain_core.messages import HumanMessage", id="HumanMessage" ), pytest.param("from langchain_c...
import subprocess import sys import pytest from pytest_benchmark.fixture import BenchmarkFixture # type: ignore @pytest.mark.parametrize( "import_path", [ pytest.param( "from langchain_core.messages import HumanMessage", id="HumanMessage" ), pytest.param("from langchain_c...
"""Decision tree based models for classification and regression.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._classes import ( BaseDecisionTree, DecisionTreeClassifier, DecisionTreeRegressor, ExtraTreeClassifier, ExtraTreeRegressor, ) from ._export impor...
"""Decision tree based models for classification and regression.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._classes import ( BaseDecisionTree, DecisionTreeClassifier, DecisionTreeRegressor, ExtraTreeClassifier, ExtraTreeRegressor, ) from ._export impor...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path from typing import List import pytest from jina import Document, DocumentArray, Executor from laser_encoder import LaserEncoder _EMBEDDING_DIM = 1024 @pytest.fixture(scope='session') ...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path from typing import List import pytest from jina import Document, DocumentArray, Executor from ...laser_encoder import LaserEncoder _EMBEDDING_DIM = 1024 @pytest.fixture(scope='sessio...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .cornernet import CornerNet from .deformable_detr import DeformableDETR from .detr import DETR from .fast_r...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .cornernet import CornerNet from .deformable_detr import DeformableDETR from .detr import DETR from .fast_r...
from typing import Annotated, Any, Literal, Optional, TypedDict from uuid import uuid4 from pydantic import BaseModel, Field, SecretStr, field_serializer class _BaseCredentials(BaseModel): id: str = Field(default_factory=lambda: str(uuid4())) provider: str title: Optional[str] @field_serializer("*")...
from typing import Annotated, Any, Literal, Optional, TypedDict from uuid import uuid4 from pydantic import BaseModel, Field, SecretStr, field_serializer class _BaseCredentials(BaseModel): id: str = Field(default_factory=lambda: str(uuid4())) provider: str title: Optional[str] @field_serializer("*")...
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py' # dataset settings train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scale=[(1333, 640), (1333, 8...
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' # dataset settings train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scale=[(1333, 640), (1333, 8...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import pytest import torch from mmengine.data import LabelData class TestLabelData(TestCase): def test_label_to_onehot(self): item = torch.tensor([1], dtype=torch.int64) num_classes = 10 onehot = LabelData.lab...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine.data import LabelData class TestLabelData(TestCase): def test_label_to_onehot(self): item = torch.tensor([1], dtype=torch.int64) num_classes = 10 onehot = LabelData.label_to_onehot(l...
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # dataset settings train_pipeline = [ dict( type='LoadImageFromFile', ...
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to...
from typing import Union, Iterable, Dict from ..base.seqlike import BaseSequenceLikeMixin from .... import Document class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like methods for DocumentArray with Elastic as storage""" def __eq__(self, other): """Compare this object to the o...
from typing import Union, Iterable, Dict from ..base.seqlike import BaseSequenceLikeMixin from .... import Document class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like methods for DocumentArray with Elastic as storage""" def __eq__(self, other): """Compare this object to the o...
class DataAdapter: """Base class for input data adapters. The purpose of a DataAdapter is to provide a unfied interface to iterate over input data provided in a variety of formats -- such as NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc. """ def get_numpy_iterator(self): ...
class DataAdapter(object): """Base class for input data adapters. The purpose of a DataAdapter is to provide a unfied interface to iterate over input data provided in a variety of formats -- such as NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc. """ def get_numpy_iterator(s...