input
stringlengths
33
5k
output
stringlengths
32
5k
# Copyright (c) OpenMMLab. All rights reserved. from .anchor import * # noqa: F401, F403 from .bbox import * # noqa: F401, F403 from .data_structures import * # noqa: F401, F403 from .evaluation import * # noqa: F401, F403 from .hook import * # noqa: F401, F403 from .mask import * # noqa: F401, F403 from .optimiz...
# Copyright (c) OpenMMLab. All rights reserved. from .anchor import * # noqa: F401, F403 from .bbox import * # noqa: F401, F403 from .data_structures import * # noqa: F401, F403 from .evaluation import * # noqa: F401, F403 from .hook import * # noqa: F401, F403 from .mask import * # noqa: F401, F403 from .optimiz...
from __future__ import annotations from typing import Iterable import torch from torch import Tensor, nn from sentence_transformers import SentenceTransformer class MSELoss(nn.Module): def __init__(self, model: SentenceTransformer) -> None: """ Computes the MSE loss between the computed sentenc...
from __future__ import annotations from typing import Iterable import torch from torch import Tensor, nn from sentence_transformers import SentenceTransformer class MSELoss(nn.Module): def __init__(self, model: SentenceTransformer) -> None: """ Computes the MSE loss between the computed sentenc...
__version__ = '0.1.0' from docarray.array.array import DocumentArray from docarray.base_document.document import BaseDocument __all__ = [ 'BaseDocument', 'DocumentArray', ]
__version__ = '0.1.0' from docarray.array.array import DocumentArray from docarray.document.document import BaseDocument from docarray.predefined_document import Audio, Image, Mesh3D, PointCloud3D, Text __all__ = [ 'BaseDocument', 'DocumentArray', 'Image', 'Audio', 'Text', 'Mesh3D', 'Point...
import os import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray.typing import ImageBytes, ImageNdArray, ImageTorchTensor from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.t...
import os import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray.typing import ImageNdArray, ImageTorchTensor from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.typing.tensor...
# Copyright (c) OpenMMLab. All rights reserved. from .default_scope import DefaultScope from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOG_PROCESSORS, LOOPS, METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARA...
# Copyright (c) OpenMMLab. All rights reserved. from .default_scope import DefaultScope from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOG_PROCESSOR, LOOPS, METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM...
from typing import Optional import torch __all__ = [ "version", "is_available", "get_max_alg_id", ] try: from torch._C import _cusparselt except ImportError: _cusparselt = None # type: ignore[assignment] __cusparselt_version: Optional[int] = None __MAX_ALG_ID: Optional[int] = None if _cuspars...
# mypy: allow-untyped-defs from typing import Optional import torch __all__ = [ "version", "is_available", "get_max_alg_id", ] try: from torch._C import _cusparselt except ImportError: _cusparselt = None # type: ignore[assignment] __cusparselt_version: Optional[int] = None __MAX_ALG_ID: Option...
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class GeneratorDatasetInputStream(AbstractDatasetInputStream): def __init__( self, generator: Callable, features: Optional...
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class GeneratorDatasetInputStream(AbstractDatasetInputStream): def __init__( self, generator: Callable, features: Optional...
from typing import TYPE_CHECKING if TYPE_CHECKING: from ..providers import ProviderName from ._base import BaseWebhooksManager _WEBHOOK_MANAGERS: dict["ProviderName", type["BaseWebhooksManager"]] = {} # --8<-- [start:load_webhook_managers] def load_webhook_managers() -> dict["ProviderName", type["BaseWebhoo...
from typing import TYPE_CHECKING from .compass import CompassWebhookManager from .github import GithubWebhooksManager from .slant3d import Slant3DWebhooksManager if TYPE_CHECKING: from ..providers import ProviderName from ._base import BaseWebhooksManager # --8<-- [start:WEBHOOK_MANAGERS_BY_NAME] WEBHOOK_MAN...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import mmengine from mmengine.utils import digit_version from .version import __version__, version_info mmcv_minimum_version = '2.0.0rc4' mmcv_maximum_version = '2.1.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.6.0' mmengi...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import mmengine from mmengine.utils import digit_version from .version import __version__, version_info mmcv_minimum_version = '2.0.0rc4' mmcv_maximum_version = '2.1.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.4.0' mmengi...
from typing import List, Optional from torchaudio._internal.module_utils import deprecated from . import utils from .common import AudioMetaData __all__ = [ "AudioMetaData", "load", "info", "save", "list_audio_backends", "get_audio_backend", "set_audio_backend", ] info = utils.get_info_...
from typing import List, Optional import torchaudio from torchaudio._internal.module_utils import deprecated # TODO: Once legacy global backend is removed, move this to torchaudio.__init__ def _init_backend(): from . import utils torchaudio.info = utils.get_info_func() torchaudio.load = utils.get_load_f...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class YOLOF(SingleStageDetector): r"""Implementation of `You Only Look One-level Feature <https://arxiv.org/abs/2103.09460>`_""" def __init__(self, ...
from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class YOLOF(SingleStageDetector): r"""Implementation of `You Only Look One-level Feature <https://arxiv.org/abs/2103.09460>`_""" def __init__(self, backbone, neck, ...
# Copyright (c) OpenMMLab. All rights reserved. from .base_sampler import BaseSampler from .combined_sampler import CombinedSampler from .instance_balanced_pos_sampler import InstanceBalancedPosSampler from .iou_balanced_neg_sampler import IoUBalancedNegSampler from .mask_pseudo_sampler import MaskPseudoSampler from .m...
# Copyright (c) OpenMMLab. All rights reserved. from .base_sampler import BaseSampler from .combined_sampler import CombinedSampler from .instance_balanced_pos_sampler import InstanceBalancedPosSampler from .iou_balanced_neg_sampler import IoUBalancedNegSampler from .ohem_sampler import OHEMSampler from .pseudo_sampler...
import os from abc import abstractmethod from unittest import mock import pytest from langchain_core.embeddings import Embeddings from pydantic import SecretStr from langchain_tests.base import BaseStandardTests class EmbeddingsTests(BaseStandardTests): """:private:""" @property @abstractmethod def...
import os from abc import abstractmethod from unittest import mock import pytest from langchain_core.embeddings import Embeddings from pydantic import SecretStr from langchain_tests.base import BaseStandardTests class EmbeddingsTests(BaseStandardTests): """ :private: """ @property @abstractmeth...
# Copyright (c) OpenMMLab. All rights reserved. import logging import random from typing import List, Optional, Tuple import numpy as np import torch from torch.utils.data import DataLoader from mmengine.device import is_cuda_available, is_musa_available from mmengine.dist import get_rank, sync_random_seed from mmeng...
# Copyright (c) OpenMMLab. All rights reserved. import logging import random from typing import List, Optional, Tuple import numpy as np import torch from torch.utils.data import DataLoader from mmengine.dist import get_rank, sync_random_seed from mmengine.logging import print_log from mmengine.utils import digit_ver...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
from unittest.mock import mock_open, patch from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import serialization from llama_index.llms.cortex.utils import ( generate_sf_jwt, is_spcs_environment, get_spcs_base_url, get_default_spcs_token, SPCS_TOKEN_PATH...
from unittest.mock import mock_open, patch from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import serialization from llama_index.llms.cortex.utils import ( generate_sf_jwt, is_spcs_environment, get_spcs_base_url, get_default_spcs_token, SPCS_TOKEN_PATH...
import pytest from docarray import DocumentArray, Document from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.qdrant import QdrantConfig from docarray.array.storag...
import pytest from docarray import DocumentArray, Document from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.qdrant import QdrantConfig from docarray.array.storag...
from typing import Any, List, Optional, Union from pathlib import Path from llama_index.core.bridge.pydantic import Field, PrivateAttr from llama_index.core.callbacks import CBEventType, EventPayload from llama_index.core.instrumentation import get_dispatcher from llama_index.core.instrumentation.events.rerank import ...
from typing import Any, List, Optional from llama_index.core.bridge.pydantic import Field, PrivateAttr from llama_index.core.callbacks import CBEventType, EventPayload from llama_index.core.instrumentation import get_dispatcher from llama_index.core.instrumentation.events.rerank import ( ReRankEndEvent, ReRank...
from typing import Dict from jina import Client, Document, DocumentArray, Executor, Flow, requests ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}} OVERRIDEN_EXECUTOR1_PARAMS = { 'param1': 'changed', 'param2': 60, 'exec_name': {'param1': 'changed'}, } class DummyOverrid...
from typing import Dict from jina import Client, Document, DocumentArray, Executor, Flow, requests ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}} OVERRIDEN_EXECUTOR1_PARAMS = { 'param1': 'changed', 'param2': 60, 'exec_name': {'param1': 'changed'}, } class DummyOverrid...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.efficientnet_v2 import ( EfficientNetV2B0 as EfficientNetV2B0, ) from keras.src.applications.efficientnet_v2 import ( EfficientNetV2B1 as EfficientNetV2B1, ) from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 from keras.src.applications.efficientnet_v2 import EfficientNetV2B1 from keras.src.applications.efficientnet_v2 import EfficientNe...
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), d...
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), sc...
""" Class for searching and importing data from OpenAlex. """ import logging from typing import List import requests from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name__) logger.setLevel(logging.ERROR) class OpenAlexReader(BaseReader)...
""" Class for searching and importing data from OpenAlex. """ import logging from typing import List import requests from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name__) logger.setLevel(logging.ERROR) class OpenAlexReader(BaseReader)...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseAssigner(metaclass=ABCMeta): """Base assigner that assigns boxes to ground truth boxes.""" @abstractmethod def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign boxes ...
from abc import ABCMeta, abstractmethod class BaseAssigner(metaclass=ABCMeta): """Base assigner that assigns boxes to ground truth boxes.""" @abstractmethod def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign boxes to either a ground truth boxes or a negative box...
from unittest.mock import MagicMock, AsyncMock import pytest import sys from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader READER_TEST_PARAM = pytest.param( [ "https://sandbox.oxylabs.io/products/1", "https://sandbox.oxylabs.io/products/2", ], { "parse": True, ...
from unittest.mock import MagicMock, AsyncMock import pytest import sys from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader READER_TEST_PARAM = pytest.param( [ "https://sandbox.oxylabs.io/products/1", "https://sandbox.oxylabs.io/products/2", ], { "parse": True, ...
from pathlib import Path from typing import Any, BinaryIO, Optional, Union from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvision.prototype.datasets...
from pathlib import Path from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvision...
"""Spotify reader.""" from typing import List, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class SpotifyReader(BaseReader): """ Spotify Reader. Read a user's saved albums, tracks, or playlists from Spotify. """ def load_data(self,...
"""Spotify reader.""" from typing import List, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class SpotifyReader(BaseReader): """Spotify Reader. Read a user's saved albums, tracks, or playlists from Spotify. """ def load_data(self, coll...
import os import time import pytest import subprocess cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def docker_image(): import docker client = docker.from_env() client.images.build(path=os.path.join(cur_dir), tag='clitest') client.close() yield time.sleep(2) cli...
import os import time import pytest import subprocess cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def docker_image(): import docker client = docker.from_env() client.images.build(path=os.path.join(cur_dir), tag='clitest') client.close() yield time.sleep(2) cli...
# Copyright (c) OpenMMLab. All rights reserved. from .empty_cache_hook import EmptyCacheHook from .hook import Hook from .iter_timer_hook import IterTimerHook from .optimizer_hook import OptimizerHook from .param_scheduler_hook import ParamSchedulerHook from .sampler_seed_hook import DistSamplerSeedHook __all__ = [ ...
# Copyright (c) OpenMMLab. All rights reserved. from .hook import Hook from .iter_timer_hook import IterTimerHook from .optimizer_hook import OptimizerHook from .param_scheduler_hook import ParamSchedulerHook from .sampler_seed_hook import DistSamplerSeedHook __all__ = [ 'Hook', 'IterTimerHook', 'DistSamplerSeedHo...
import pytest from backend.util.request import pin_url, validate_url @pytest.mark.parametrize( "raw_url, trusted_origins, expected_value, should_raise", [ # Rejected IP ranges ("localhost", [], None, True), ("192.168.1.1", [], None, True), ("127.0.0.1", [], None, True), ...
import pytest from backend.util.request import validate_url @pytest.mark.parametrize( "url, trusted_origins, expected_value, should_raise", [ # Rejected IP ranges ("localhost", [], None, True), ("192.168.1.1", [], None, True), ("127.0.0.1", [], None, True), ("0.0.0.0",...
_base_ = './mask-rcnn_r50_fpn_instaboost-4x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='...
_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.0.0rc2' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is par...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.0.0rc1' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is par...
# Copyright (c) OpenMMLab. All rights reserved. from .evaluator import Evaluator from .metric import BaseMetric from .utils import get_metric_value __all__ = ['BaseMetric', 'Evaluator', 'get_metric_value']
# Copyright (c) OpenMMLab. All rights reserved. from .base import BaseEvaluator from .builder import build_evaluator from .composed_evaluator import ComposedEvaluator from .utils import get_metric_value __all__ = [ 'BaseEvaluator', 'ComposedEvaluator', 'build_evaluator', 'get_metric_value' ]
import pytest from datasets import Dataset from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler from sentence_transformers.sampler import RoundRobinBatchSampler DATASET_LENGTH = 25 @pytest.fixture def dummy_concat_dataset() -> ConcatDataset: """ Dummy dataset for testing purposes. The...
import pytest from datasets import Dataset from sentence_transformers.sampler import RoundRobinBatchSampler from torch.utils.data import BatchSampler, SequentialSampler, ConcatDataset DATASET_LENGTH = 25 @pytest.fixture def dummy_concat_dataset() -> ConcatDataset: """ Dummy dataset for testing purposes. The...
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
from io import BytesIO from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing.abstract_type import AbstractType from docarray.typing.proto_register import _register_proto from docar...
from io import BytesIO from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing.abstract_type import AbstractType from docarray.typing.proto_register import _register_proto from docar...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores.utils import ( DistanceStrategy, filter_complex_metadata, maximal_marginal_relevance, ) # Create a way to dynamically look up deprecated imports. # ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores.utils import ( DistanceStrategy, filter_complex_metadata, maximal_marginal_relevance, ) # Create a way to dynamically look up deprecated imports. # ...
from enum import Enum from typing import Any, Dict, Iterable import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """The metric for the contrastive loss""" EUCLIDEAN = lambda x, y: F.pairwis...
from enum import Enum from typing import Dict, Iterable import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """The metric for the contrastive loss""" EUCLIDEAN = lambda x, y: F.pairwise_dis...
from typing import ClassVar, Optional, Union import torch import torch.utils.checkpoint from torch import nn from transformers.models.paligemma.modeling_paligemma import PaliGemmaForConditionalGeneration from ...cache_utils import Cache class NewTaskModelForNewTask(PaliGemmaForConditionalGeneration): main_inpu...
from typing import ClassVar, Optional, Union import torch import torch.utils.checkpoint from torch import nn from transformers.models.paligemma.modeling_paligemma import PaliGemmaForConditionalGeneration from ...cache_utils import Cache class NewTaskModelForNewTask(PaliGemmaForConditionalGeneration): main_inpu...
# Copyright (c) OpenMMLab. All rights reserved. import os import os.path as osp from unittest.mock import Mock, patch from mmengine.hooks import CheckpointHook class MockPetrel: _allow_symlink = False def __init__(self): pass @property def name(self): return self.__class__.__name__...
# Copyright (c) OpenMMLab. All rights reserved. import os import sys from tempfile import TemporaryDirectory from unittest.mock import Mock, patch from mmengine.hooks import CheckpointHook sys.modules['file_client'] = sys.modules['mmengine.fileio.file_client'] class MockPetrel: _allow_symlink = False def ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from typing import List import numpy as np import pytest from jina import Document, DocumentArray, Flow from ...paddle_image import ImagePaddlehubEncoder @pytest.mark.parametrize( 'arr_in', ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import List import numpy as np import pytest from jina import Flow, Document, DocumentArray from ...paddle_image import ImagePaddlehubEncoder @pytest.mark.parametrize('arr_in', [ (np.ones((3, 224, 2...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa model = dict( type='LAD', # student bac...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa model = dict( type='LAD', # student bac...
import json import logging from abc import ABC, abstractmethod from datetime import datetime from typing import Any, AsyncGenerator, Generator, Generic, TypeVar from pydantic import BaseModel from redis.asyncio.client import PubSub as AsyncPubSub from redis.client import PubSub from backend.data import redis from bac...
import json import logging from abc import ABC, abstractmethod from datetime import datetime from backend.data import redis from backend.data.execution import ExecutionResult logger = logging.getLogger(__name__) class DateTimeEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, datetime): ...
_base_ = [ '../_base_/models/faster-rcnn_r50-caffe-c4.py', '../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # dataset settings train_pipeline = [ dict( type='LoadImageFromFile', ...
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # dataset settings train_pipeline = [ dict( type='LoadImageFromFile', ...
# Copyright (c) OpenMMLab. All rights reserved. from .augment_wrappers import AutoAugment, RandAugment from .colorspace import (AutoContrast, Brightness, Color, ColorTransform, Contrast, Equalize, Invert, Posterize, Sharpness, Solarize, SolarizeAdd) from .formatting imp...
# Copyright (c) OpenMMLab. All rights reserved. from .augment_wrappers import AutoAugment, RandAugment from .colorspace import (AutoContrast, Brightness, Color, ColorTransform, Contrast, Equalize, Invert, Posterize, Sharpness, Solarize, SolarizeAdd) from .formatting imp...
"""Math utils.""" import logging from typing import List, Optional, Tuple, Union import numpy as np logger = logging.getLogger(__name__) Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray] def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray: """Row-wise cosine similarity between two equal-wi...
"""Math utils.""" import logging from typing import List, Optional, Tuple, Union import numpy as np logger = logging.getLogger(__name__) Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray] def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray: """Row-wise cosine similarity between two equal-wi...
import multiprocessing import pytest from jina import DocumentArray, Executor, requests from jina.parsers import set_pod_parser from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime from jina.serve.runtimes.worker import WorkerRuntime from jina.serve.streamer import GatewayStreamer class StreamerTestExecutor(...
import multiprocessing import pytest from jina import DocumentArray, Executor, requests from jina.parsers import set_pod_parser from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime from jina.serve.runtimes.worker import WorkerRuntime from jina.serve.streamer import GatewayStreamer class StreamerTestExecutor(...
import io from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing.abstract_type import AbstractType from docarray.typing.proto_register import _register_proto from docarray.utils._internal.mis...
import io from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing.abstract_type import AbstractType from docarray.typing.proto_register import _register_proto if TYPE_CHECKING: from pydan...
"""Base interfaces for tracing runs.""" from langchain_core.exceptions import TracerException from langchain_core.tracers.base import BaseTracer __all__ = ["BaseTracer", "TracerException"]
"""Base interfaces for tracing runs.""" from langchain_core.tracers.base import BaseTracer, TracerException __all__ = ["BaseTracer", "TracerException"]
import asyncio import copy from typing import Any, List, Optional from jina.serve.gateway import BaseGateway class CompositeGateway(BaseGateway): """GRPC Gateway implementation""" def __init__( self, **kwargs, ): """Initialize the gateway :param kwargs: keyword args ...
import asyncio import copy from typing import Any, List, Optional from jina.serve.gateway import BaseGateway class CompositeGateway(BaseGateway): """GRPC Gateway implementation""" def __init__( self, **kwargs, ): """Initialize the gateway :param kwargs: keyword args ...
""" =================================================== Recursive feature elimination with cross-validation =================================================== A Recursive Feature Elimination (RFE) example with automatic tuning of the number of features selected with cross-validation. """ # Authors: The scikit-learn...
""" =================================================== Recursive feature elimination with cross-validation =================================================== A Recursive Feature Elimination (RFE) example with automatic tuning of the number of features selected with cross-validation. """ # Authors: The scikit-learn...
import json import sys def format_json_to_md(input_json_file, output_md_file): with open(input_json_file, encoding="utf-8") as f: results = json.load(f) output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(results): benchmark_res = re...
import json import sys def format_json_to_md(input_json_file, output_md_file): with open(input_json_file, encoding="utf-8") as f: results = json.load(f) output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(results): benchmark_res = r...
import pytest from absl.testing import parameterized from keras.src import backend from keras.src import layers from keras.src import testing class IdentityTest(testing.TestCase): @parameterized.named_parameters( [ {"testcase_name": "dense", "sparse": False}, {"testcase_name": "sp...
import pytest from absl.testing import parameterized from keras.src import backend from keras.src import layers from keras.src import testing class IdentityTest(testing.TestCase, parameterized.TestCase): @parameterized.named_parameters( [ {"testcase_name": "dense", "sparse": False}, ...
def check_health_pod(addr: str): """check if a pods is healthy :param addr: the address on which the pod is serving ex : localhost:1234 """ from jina.serve.runtimes.servers import BaseServer is_ready = BaseServer.is_ready(addr) if not is_ready: raise Exception('Pod is unhealthy') ...
def check_health_pod(addr: str): """check if a pods is healthy :param addr: the address on which the pod is serving ex : localhost:1234 """ from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime is_ready = AsyncNewLoopRuntime.is_ready(addr) if not is_ready: raise Exception('Pod i...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess import numpy as np import pytest from executor.audioclip_image import AudioCLIPImageEncoder from jina import Document, DocumentArray, Flow @pytest.mark.parametrize("request_size", [1, 10, 50,...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess import numpy as np import pytest from jina import Document, DocumentArray, Flow from ...audioclip_image import AudioCLIPImageEncoder @pytest.mark.parametrize("request_size", [1, 10, 50, 100]...
"""Lilac reader that loads enriched and labeled Lilac datasets into GPTIndex and LangChain.""" from typing import TYPE_CHECKING, List, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document if TYPE_CHECKING: from lilac import ColumnId, FilterLike, Path class ...
"""Lilac reader that loads enriched and labeled Lilac datasets into GPTIndex and LangChain.""" from typing import TYPE_CHECKING, List, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document if TYPE_CHECKING: from lilac import ColumnId, FilterLike, Path class L...
_base_ = './retinanet_r50_fpn_1x_coco.py' # use caffe img_norm preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) model = dict( preprocess_cfg=preprocess_cfg, backbone=dict( norm_cfg=dict(requires_grad=False), norm_ev...
_base_ = './retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from ..builder import BBOX_SAMPLERS from .random_sampler import RandomSampler @BBOX_SAMPLERS.register_module() class InstanceBalancedPosSampler(RandomSampler): """Instance balanced sampler that samples equal number of positive sample...
import numpy as np import torch from ..builder import BBOX_SAMPLERS from .random_sampler import RandomSampler @BBOX_SAMPLERS.register_module() class InstanceBalancedPosSampler(RandomSampler): """Instance balanced sampler that samples equal number of positive samples for each instance.""" def _sample_pos...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_transformers import Html2TextTransformer # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optio...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_transformers import Html2TextTransformer # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optio...
from typing import TYPE_CHECKING, Optional, Type from langchain_core.callbacks import ( CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool from pydantic import BaseModel, Field if TYPE_CHECKING: # This is for linting and IDE typehints import multion else: try: # We do this ...
from typing import TYPE_CHECKING, Optional, Type from langchain_core.callbacks import ( CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool from pydantic import BaseModel, Field if TYPE_CHECKING: # This is for linting and IDE typehints import multion else: try: # We do this ...
from contextlib import nullcontext from sentence_transformers.evaluation import SentenceEvaluator from sentence_transformers import SentenceTransformer from typing import List, Optional, Tuple, Dict import numpy as np import logging import os import csv logger = logging.getLogger(__name__) class MSEEvaluatorFromDat...
from contextlib import nullcontext from sentence_transformers.evaluation import SentenceEvaluator from sentence_transformers import SentenceTransformer from typing import List, Optional, Tuple, Dict import numpy as np import logging import os import csv logger = logging.getLogger(__name__) class MSEEvaluatorFromDat...
import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class RandomRotationTest(testing.TestCase): @parameterized.named_parameters( ("random_rotate_neg4", -0.4), ("ra...
import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class RandomRotationTest(testing.TestCase, parameterized.TestCase): @parameterized.named_parameters( ("random_rotate_ne...
import pytest from jina import Executor, Flow, requests from jina.constants import __default_executor__ from tests import random_docs @pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http']) def test_flow(protocol): docs = random_docs(10) f = Flow(protocol=protocol).add(name='p1') with f: ...
import pytest from jina import Executor, Flow, __default_executor__, requests from tests import random_docs @pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http']) def test_flow(protocol): docs = random_docs(10) f = Flow(protocol=protocol).add(name='p1') with f: f.index(docs) ...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict import torch.nn as nn from torch import Tensor from mmdet.registry import MODELS from ..layers import (ConditionalDetrTransformerDecoder, DetrTransformerEncoder, SinePositionalEncoding) from .detr import DETR @MODELS.regis...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict import torch.nn as nn from torch import Tensor from mmdet.registry import MODELS from ..layers import (ConditionalDetrTransformerDecoder, DetrTransformerEncoder, SinePositionalEncoding) from .detr import DETR @MODELS.regis...
""" ==================================== How to write your own TVTensor class ==================================== .. note:: Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_tv_tensors.ipynb>`_ or :ref:`go to the end <sphx_glr_dow...
""" ===================================== How to write your own TVTensor class ===================================== .. note:: Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_tv_tensors.ipynb>`_ or :ref:`go to the end <sphx_glr_d...
"""Module to change the configuration of libsox, which is used by I/O functions like :py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`. """ from typing import Dict, List import torchaudio sox_ext = torchaudio._extension.lazy_import_sox_ext() from torchaudio._internal.module_utils im...
"""Module to change the configuration of libsox, which is used by I/O functions like :py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`. """ from typing import Dict, List import torchaudio sox_ext = torchaudio._extension.lazy_import_sox_ext() def set_seed(seed: int): """Set libs...
from typing import Dict from jina.helper import TYPE_CHECKING, T, deprecate_by, typename if TYPE_CHECKING: # pragma: no cover from jina.proto import jina_pb2 class ProtoTypeMixin: """The base mixin class of all Jina types. .. note:: - All Jina types should inherit from this class. - Al...
from typing import Dict from jina.helper import TYPE_CHECKING, T, deprecate_by, typename if TYPE_CHECKING: # pragma: no cover from jina.proto import jina_pb2 class ProtoTypeMixin: """The base mixin class of all Jina types. .. note:: - All Jina types should inherit from this class. - All...
"""Standard LangChain interface tests""" import pytest from langchain_core.language_models import BaseChatModel from langchain_tests.unit_tests import ChatModelUnitTests from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped] from langchain_anthropic import ChatAnthropic class TestAnth...
"""Standard LangChain interface tests""" from langchain_core.language_models import BaseChatModel from langchain_tests.unit_tests import ChatModelUnitTests from langchain_anthropic import ChatAnthropic class TestAnthropicStandard(ChatModelUnitTests): @property def chat_model_class(self) -> type[BaseChatMode...
# Copyright (c) OpenMMLab. All rights reserved. from .activations import SiLU from .bbox_nms import fast_nms, multiclass_nms from .brick_wrappers import (AdaptiveAvgPool2d, FrozenBatchNorm2d, adaptive_avg_pool2d) from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from ....
# Copyright (c) OpenMMLab. All rights reserved. from .activations import SiLU from .bbox_nms import fast_nms, multiclass_nms from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .dropblock import DropBlock from .ema import ExpMom...
import torchaudio from torchaudio_unittest import common_utils class BackendSwitchMixin: """Test set/get_audio_backend works""" backend = None backend_module = None def test_switch(self): torchaudio.backend.utils.set_audio_backend(self.backend) if self.backend is None: as...
from unittest.mock import patch import torchaudio from torchaudio_unittest import common_utils class BackendSwitchMixin: """Test set/get_audio_backend works""" backend = None backend_module = None @patch("torchaudio.backend.utils._is_backend_dispatcher_enabled", lambda: False) def test_switch(s...
import inspect import re from hashlib import sha256 from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from...
import inspect import re from hashlib import sha256 from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from...
"""Types for content blocks.""" from typing import Any, Literal, Union from pydantic import TypeAdapter, ValidationError from typing_extensions import NotRequired, TypedDict class BaseDataContentBlock(TypedDict, total=False): """Base class for data content blocks.""" mime_type: NotRequired[str] """MIME...
"""Types for content blocks.""" from typing import Any, Literal, Union from pydantic import TypeAdapter, ValidationError from typing_extensions import NotRequired, TypedDict class BaseDataContentBlock(TypedDict): """Base class for data content blocks.""" mime_type: NotRequired[str] """MIME type of the ...
from typing import TYPE_CHECKING, Optional, Dict if TYPE_CHECKING: from ... import DocumentArray class PostMixin: """Helper functions for posting DocumentArray to Jina Flow.""" def post( self, host: str, show_progress: bool = False, batch_size: Optional[int] = None, ...
from typing import TYPE_CHECKING, Optional, Dict if TYPE_CHECKING: from ... import DocumentArray class PostMixin: """Helper functions for posting DocumentArray to Jina Flow.""" def post( self, host: str, show_progress: bool = False, batch_size: Optional[int] = None, ...
from .cmuarctic import CMUARCTIC from .cmudict import CMUDict from .commonvoice import COMMONVOICE from .dr_vctk import DR_VCTK from .fluentcommands import FluentSpeechCommands from .gtzan import GTZAN from .iemocap import IEMOCAP from .librilight_limited import LibriLightLimited from .librimix import LibriMix from .li...
from .cmuarctic import CMUARCTIC from .cmudict import CMUDict from .commonvoice import COMMONVOICE from .dr_vctk import DR_VCTK from .fluentcommands import FluentSpeechCommands from .gtzan import GTZAN from .librilight_limited import LibriLightLimited from .librimix import LibriMix from .librispeech import LIBRISPEECH ...
import json from json import JSONDecodeError from typing import Union from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish from langchain_core.exceptions import OutputParserException from langchain_core.messages import ( AIMessage, BaseMessage, ToolCall, ) from langchain_core.o...
import json from json import JSONDecodeError from typing import Union from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish from langchain_core.exceptions import OutputParserException from langchain_core.messages import ( AIMessage, BaseMessage, ToolCall, ) from langchain_core.o...
# Copyright (c) OpenMMLab. All rights reserved. import copy import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmdet.core import OptMultiConfig from mmdet.models.dense_heads.fcos_head import FCOSHead from mmdet.registry import MODELS @MODELS.register_module() class NASFCOSHead(FCOSHead): """Ancho...
# Copyright (c) OpenMMLab. All rights reserved. import copy import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmdet.models.dense_heads.fcos_head import FCOSHead from mmdet.registry import MODELS @MODELS.register_module() class NASFCOSHead(FCOSHead): """Anchor-free head used in `NASFCOS <https://...
import itertools import warnings from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class PandasConfig(datasets.BuilderConfig): """BuilderConfig for Pandas.""" features: Optional[datasets.Fe...
import itertools import warnings from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class PandasConfig(datasets.BuilderConfig): """BuilderConfig for Pandas.""" features: Optional[datasets.Fe...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Union[dict, tuple, list]] @HOOKS.register_module() class ParamSchedulerHook(Hook): """A hook to update some hyper-parameters in optimizer, e.g., lea...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Sequence[dict]] @HOOKS.register_module() class ParamSchedulerHook(Hook): """A hook to update some hyper-parameters in optimizer, e.g., learning r...
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://ww...
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://ww...
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parq...
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parq...
import os from typing import Dict from hubble.executor.helper import is_valid_docker_uri, parse_hub_uri from hubble.executor.hubio import HubIO from jina.constants import ( __default_composite_gateway__, __default_executor__, __default_grpc_gateway__, __default_http_gateway__, __default_websocket_...
import os from typing import Dict from hubble.executor.helper import is_valid_docker_uri, parse_hub_uri from hubble.executor.hubio import HubIO from jina import ( __default_composite_gateway__, __default_executor__, __default_grpc_gateway__, __default_http_gateway__, __default_websocket_gateway__,...
import asyncio from typing import Any, AsyncGenerator, List, Optional from llama_index.core.workflow.context import Context from llama_index.core.workflow.errors import WorkflowDone from llama_index.core.workflow.events import Event, StopEvent from .types import RunResultT from .utils import BUSY_WAIT_DELAY class W...
import asyncio from typing import Any, AsyncGenerator, List, Optional from llama_index.core.workflow.context import Context from llama_index.core.workflow.errors import WorkflowDone from llama_index.core.workflow.events import Event, StopEvent from .types import RunResultT from .utils import BUSY_WAIT_DELAY class W...
"""Utilities for the XGBoost Dask interface.""" import logging from typing import TYPE_CHECKING, Any, Dict LOGGER = logging.getLogger("[xgboost.dask]") if TYPE_CHECKING: import distributed def get_n_threads(local_param: Dict[str, Any], worker: "distributed.Worker") -> int: """Get the number of threads fro...
"""Utilities for the XGBoost Dask interface.""" import logging from typing import TYPE_CHECKING, Any, Dict LOGGER = logging.getLogger("[xgboost.dask]") if TYPE_CHECKING: import distributed def get_n_threads(local_param: Dict[str, Any], worker: "distributed.Worker") -> int: """Get the number of threads from...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Union from ..registry import EVALUATORS from .base import BaseEvaluator from .composed_evaluator import ComposedEvaluator def build_evaluator( cfg: Union[dict, list]) -> Union[BaseEvaluator, ComposedEvaluator]: """Build function of evalua...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union from ..registry import EVALUATORS from .base import BaseEvaluator from .composed_evaluator import ComposedEvaluator def build_evaluator( cfg: Union[dict, list], default_scope: Optional[str] = None ) -> Union[BaseEvaluator, Com...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
import numpy as np import pytest from hnswlib_searcher import HnswlibSearcher from jina import Document, DocumentArray, Flow _DIM = 10 @pytest.mark.parametrize('uses', ['HnswlibSearcher', 'docker://hnswlibsearcher']) def test_index_search_flow(uses: str, build_docker_image: str): f = Flow().add(uses=uses, uses_w...
import numpy as np import pytest from hnswlib_searcher import HnswlibSearcher from jina import Document, DocumentArray, Flow _DIM = 10 @pytest.mark.parametrize('uses', ['HnswlibSearcher', 'docker://hnswlibsearcher']) def test_index_search_flow(uses: str, build_docker_image: str): f = Flow().add(uses=uses, uses_w...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.quantizers import deserialize as deserialize from keras.src.quantizers import get as get from keras.src.quantizers import serialize as serialize from keras.src.quantizers.quantizers i...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.quantizers import deserialize as deserialize from keras.src.quantizers import get as get from keras.src.quantizers import serialize as serialize from keras.src.quantizers.quantizers i...
# Copyright (c) OpenMMLab. All rights reserved. from .base_tracker import BaseTracker from .byte_tracker import ByteTracker from .quasi_dense_tracker import QuasiDenseTracker from .sort_tracker import SORTTracker __all__ = ['BaseTracker', 'ByteTracker', 'QuasiDenseTracker', 'SORTTracker']
# Copyright (c) OpenMMLab. All rights reserved. from .base_tracker import BaseTracker from .byte_tracker import ByteTracker from .quasi_dense_tracker import QuasiDenseTracker __all__ = ['BaseTracker', 'ByteTracker', 'QuasiDenseTracker']
_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' model = dict( data_preprocessor=dict( mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False), backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( ...
_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), ...
# Copyright (c) OpenMMLab. All rights reserved. """MMDetection provides 17 registry nodes to support using modules across projects. Each node is a child of the root registry in MMEngine. More details can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from mmengine.registry import D...
# Copyright (c) OpenMMLab. All rights reserved. """MMDetection provides 17 registry nodes to support using modules across projects. Each node is a child of the root registry in MMEngine. More details can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from mmengine.registry import D...
""" Gcs file and directory reader. A loader that fetches a file or iterates through a directory on Gcs. """ from typing import Dict, List, Optional, Union from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from llama_index.readers.opendal.base import OpendalReader cl...
"""Gcs file and directory reader. A loader that fetches a file or iterates through a directory on Gcs. """ from typing import Dict, List, Optional, Union from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from llama_index.readers.opendal.base import OpendalReader cla...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
""" This file is part of the private API. Please do not use directly these classes as they will be modified on future versions without warning. The classes should be accessed only via the transforms argument of Weights. """ from typing import List, Optional, Tuple, Union import PIL.Image import torch from torch impor...
""" This file is part of the private API. Please do not use directly these classes as they will be modified on future versions without warning. The classes should be accessed only via the transforms argument of Weights. """ from typing import List, Optional, Tuple, Union import PIL.Image import torch from torch impor...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast import numpy as np from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal....
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast import numpy as np from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal....
"""Module for parsing text files..""" from typing import Iterator from langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob class TextParser(BaseBlobParser): """Parser for text blobs.""" ...
"""Module for parsing text files..""" from typing import Iterator from langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob class TextParser(BaseBlobParser): """Parser for text blobs.""" ...
from docarray.index.backends.elastic import ElasticV7DocIndex from docarray.index.backends.hnswlib import HnswDocumentIndex __all__ = ['HnswDocumentIndex', 'ElasticV7DocIndex']
from docarray.index.backends.hnswlib import HnswDocumentIndex __all__ = ['HnswDocumentIndex']
import logging from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseEncoder, SparseTripletEvaluator, SpladePooling, ) logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) # Initialize the SPLADE...
from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseEncoder, SparseTripletEvaluator, SpladePooling, ) # Initialize the SPLADE model model_name = "naver/splade-cocondenser-ensembledistil" model = SparseEncoder( modules=[ MLMTransformer(...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast import numpy as np from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal....
from typing import Union from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.utils._internal.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor tf_av...
import importlib.util import os import warnings from functools import wraps from typing import Optional def eval_env(var, default): """Check if environment varable has True-y value""" if var not in os.environ: return default val = os.environ.get(var, "0") trues = ["1", "true", "TRUE", "on", "...
import importlib.util import warnings from functools import wraps from typing import Optional def is_module_available(*modules: str) -> bool: r"""Returns if a top-level module with :attr:`name` exists *without** importing it. This is generally safer than try-catch block around a `import X`. It avoids thir...