input
stringlengths
33
5k
output
stringlengths
32
5k
import json import re from typing import TypeVar import yaml from langchain_core.exceptions import OutputParserException from langchain_core.output_parsers import BaseOutputParser from pydantic import BaseModel, ValidationError from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS T = Typ...
import json import re from typing import TypeVar import yaml from langchain_core.exceptions import OutputParserException from langchain_core.output_parsers import BaseOutputParser from pydantic import BaseModel, ValidationError from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS T = Typ...
# Copyright (c) Meta Platforms, Inc. and affiliates from llama_index.llms.meta.base import LlamaLLM __all__ = ["LlamaLLM"]
from llama_index.llms.meta.base import LlamaLLM __all__ = ["LlamaLLM"]
from jina import Client, Document, Executor, Flow, requests def validate_results(results): req = results[0] assert len(req.docs) == 1 assert len(req.docs[0].matches) == 5 assert len(req.docs[0].matches[0].matches) == 5 assert len(req.docs[0].matches[-1].matches) == 5 assert len(req.docs[0].mat...
from jina import Client, Document, Executor, Flow, requests exposed_port = 12345 def validate_results(results): req = results[0] assert len(req.docs) == 1 assert len(req.docs[0].matches) == 5 assert len(req.docs[0].matches[0].matches) == 5 assert len(req.docs[0].matches[-1].matches) == 5 asse...
_base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py' # MMEngine support the following two ways, users can choose # according to convenience # param_scheduler = [ # dict( # type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa # dict( # type='MultiStepLR', # begi...
_base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
import logging from typing import Any from autogpt_libs.utils.cache import thread_cached from backend.data.block import ( Block, BlockCategory, BlockInput, BlockOutput, BlockSchema, BlockType, get_block, ) from backend.data.execution import ExecutionStatus from backend.data.model import Sc...
import logging from typing import Any from autogpt_libs.utils.cache import thread_cached from backend.data.block import ( Block, BlockCategory, BlockInput, BlockOutput, BlockSchema, BlockType, get_block, ) from backend.data.execution import ExecutionStatus from backend.data.model import Sc...
from typing import Iterable, Dict from docarray.array.storage.annlite.helper import OffsetMapping from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin from docarray.array.storage.base.helper import Offset2ID from docarray.array.memory import DocumentArrayInMemory from docarray import Document class G...
from typing import Iterable, Dict from .helper import OffsetMapping from ..base.getsetdel import BaseGetSetDelMixin from ..base.helper import Offset2ID from ...memory import DocumentArrayInMemory from .... import Document class GetSetDelMixin(BaseGetSetDelMixin): """Implement required and derived functions that ...
_base_ = './mask_rcnn_r101_fpn_1x_coco.py' preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False, pad_size_divisor=32) model = dict( preprocess_cfg=preprocess_cfg, backbone=dict( type='ResNeXt', depth=101, groups=32, b...
_base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch',...
from enum import Enum # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" E2B = "e2b" EXA = "exa" FAL = "fal" GITHUB = "github" GOOGLE = "google" GOOGLE_MAPS = "google_maps" GROQ = "groq"...
from enum import Enum # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" E2B = "e2b" EXA = "exa" FAL = "fal" GITHUB = "github" GOOGLE = "google" GOOGLE_MAPS = "google_maps" GROQ = "groq"...
# mypy: allow-untyped-defs import functools from collections.abc import Hashable from dataclasses import dataclass, fields from typing import TypeVar from typing_extensions import dataclass_transform T = TypeVar("T", bound="_Union") class _UnionTag(str): __slots__ = ("_cls",) _cls: Hashable @staticmeth...
# mypy: allow-untyped-defs import functools from collections.abc import Hashable from dataclasses import dataclass, fields from typing import TypeVar from typing_extensions import dataclass_transform T = TypeVar("T", bound="_Union") class _UnionTag(str): __slots__ = ("_cls",) _cls: Hashable @staticmeth...
"""Common structures for structured indices.""" from dataclasses import dataclass from typing import Dict, Optional from dataclasses_json import DataClassJsonMixin # TODO: migrate this to be a data_struct @dataclass class SQLContextContainer(DataClassJsonMixin): """ SQLContextContainer. A container int...
"""Common structures for structured indices.""" from dataclasses import dataclass from typing import Dict, Optional from dataclasses_json import DataClassJsonMixin # TODO: migrate this to be a data_struct @dataclass class SQLContextContainer(DataClassJsonMixin): """ SQLContextContainer. A container inte...
# Owner(s): ["module: inductor"] from unittest.mock import patch import torch from torch._inductor import config from torch._inductor.async_compile import AsyncCompile, shutdown_compile_workers from torch._inductor.runtime.triton_compat import Config from torch._inductor.runtime.triton_heuristics import ( generate...
# Owner(s): ["module: inductor"] import torch from torch._inductor import config from torch._inductor.async_compile import AsyncCompile, shutdown_compile_workers from torch._inductor.test_case import run_tests, TestCase from torch._inductor.utils import fresh_cache from torch.testing._internal.common_utils import ( ...
import pytest from docarray import DocumentArray, Document from docarray.array.weaviate import DocumentArrayWeaviate import numpy as np @pytest.fixture() def docs(): return DocumentArray([Document(id=f'{i}') for i in range(1, 10)]) @pytest.mark.parametrize( 'to_delete', [ 0, 1, ...
import pytest from docarray import DocumentArray, Document from docarray.array.weaviate import DocumentArrayWeaviate import numpy as np @pytest.fixture() def docs(): return DocumentArray([Document(id=f'{i}') for i in range(1, 10)]) @pytest.mark.parametrize( 'to_delete', [ 0, 1, ...
"""Helper functions for managing the LangChain API. This module is only relevant for LangChain developers, not for users. .. warning:: This module and its submodules are for internal use only. Do not use them in your own code. We may change the API at any time with no warning. """ from .deprecation impor...
"""Helper functions for managing the LangChain API. This module is only relevant for LangChain developers, not for users. .. warning:: This module and its submodules are for internal use only. Do not use them in your own code. We may change the API at any time with no warning. """ from .deprecation impor...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar...
# Copyright (c) OpenMMLab. All rights reserved. from .manager import ManagerMeta, ManagerMixin from .misc import (check_prerequisites, concat_list, deprecated_api_warning, deprecated_function, has_method, import_modules_from_strings, is_list_of, is_method_overrid...
# Copyright (c) OpenMMLab. All rights reserved. from .manager import ManagerMeta, ManagerMixin from .misc import (check_prerequisites, concat_list, deprecated_api_warning, has_method, import_modules_from_strings, is_list_of, is_method_overridden, is_seq_of, is_str, is_tuple_of, ...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, ...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, ...
from torchaudio._internal.module_utils import dropping_support from ._alignment import forced_align as _forced_align, merge_tokens, TokenSpan from .filtering import ( allpass_biquad, band_biquad, bandpass_biquad, bandreject_biquad, bass_biquad, biquad, contrast, dcshift, deemph_biqu...
from ._alignment import forced_align, merge_tokens, TokenSpan from .filtering import ( allpass_biquad, band_biquad, bandpass_biquad, bandreject_biquad, bass_biquad, biquad, contrast, dcshift, deemph_biquad, dither, equalizer_biquad, filtfilt, flanger, gain, hi...
import warnings from typing import Any from langchain_core.memory import BaseMemory from pydantic import field_validator from langchain.memory.chat_memory import BaseChatMemory class CombinedMemory(BaseMemory): """Combining multiple memories' data together.""" memories: list[BaseMemory] """For tracking...
import warnings from typing import Any from langchain_core.memory import BaseMemory from pydantic import field_validator from langchain.memory.chat_memory import BaseChatMemory class CombinedMemory(BaseMemory): """Combining multiple memories' data together.""" memories: list[BaseMemory] """For tracking...
from typing import Dict, Optional, Union import pytest from docarray.typing import NdArray, TorchTensor from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils._typing import is_tensor_union, is_type_tensor from docarray.utils.misc import is_tf_available tf_available = is_tf_available()...
from typing import Dict, Optional, Union import pytest from docarray.typing import NdArray, TorchTensor from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils._typing import is_tensor_union, is_type_tensor try: from docarray.typing import TensorFlowTensor except (ImportError, TypeE...
# Copyright (c) OpenMMLab. All rights reserved. import math import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16 from mmdet.registry import MODELS @MODELS.register_module() class CTResNetNeck(BaseModule): """The neck used in `CenterNet <https://arxiv.org/abs/1904.0...
# Copyright (c) OpenMMLab. All rights reserved. import math import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.builder import NECKS @NECKS.register_module() class CTResNetNeck(BaseModule): """The neck used in `CenterNet <https://arxiv.org/abs/19...
from __future__ import annotations from .CSRLoss import CSRLoss, CSRReconstructionLoss from .FlopsLoss import FlopsLoss from .SparseAnglELoss import SparseAnglELoss from .SparseCoSENTLoss import SparseCoSENTLoss from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss from .SparseDistillKLDivLoss import Spar...
from __future__ import annotations from .CSRLoss import CSRLoss, CSRReconstructionLoss from .FlopsLoss import FlopsLoss from .SparseAnglELoss import SparseAnglELoss from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankin...
"""Callback Handler that writes to a file.""" from __future__ import annotations from pathlib import Path from typing import TYPE_CHECKING, Any, Optional, TextIO, cast from langchain_core.callbacks import BaseCallbackHandler from langchain_core.utils.input import print_text if TYPE_CHECKING: from langchain_core...
"""Callback Handler that writes to a file.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional, TextIO, cast from langchain_core.callbacks import BaseCallbackHandler from langchain_core.utils.input import print_text if TYPE_CHECKING: from langchain_core.agents import AgentActio...
__version__ = '0.30.1' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.utils._internal.misc import _get_path_from_docarray_root_level __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler()...
__version__ = '0.30.1' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler() formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s") hand...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import NavigateTool from langchain_community.tools.playwright.navigate import NavigateToolInput # Create a way to dynamically look up deprecated imports. # Used to consolidate log...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import NavigateTool from langchain_community.tools.playwright.navigate import NavigateToolInput # Create a way to dynamically look up deprecated imports. # Used to consolidate log...
"""Dataset Module.""" from llama_index.core.llama_dataset.base import ( BaseLlamaDataExample, BaseLlamaDataset, BaseLlamaExamplePrediction, BaseLlamaPredictionDataset, CreatedBy, CreatedByType, ) from llama_index.core.llama_dataset.download import download_llama_dataset from llama_index.core.ll...
""" Dataset Module.""" from llama_index.core.llama_dataset.base import ( BaseLlamaDataExample, BaseLlamaDataset, BaseLlamaExamplePrediction, BaseLlamaPredictionDataset, CreatedBy, CreatedByType, ) from llama_index.core.llama_dataset.download import download_llama_dataset from llama_index.core.l...
"""[DEPRECATED] Pipeline prompt template.""" from typing import Any from pydantic import model_validator from langchain_core._api.deprecation import deprecated from langchain_core.prompt_values import PromptValue from langchain_core.prompts.base import BasePromptTemplate from langchain_core.prompts.chat import BaseC...
"""[DEPRECATED] Pipeline prompt template.""" from typing import Any from pydantic import model_validator from langchain_core._api.deprecation import deprecated from langchain_core.prompt_values import PromptValue from langchain_core.prompts.base import BasePromptTemplate from langchain_core.prompts.chat import BaseC...
import itertools import os.path import pytest from docarray import Document, DocumentArray from jina import Client, Executor, Flow, requests from jina.helper import random_port PROTOCOLS = ['grpc', 'http', 'websocket'] cur_dir = os.path.dirname(__file__) class MyExecutor(Executor): @requests def foo(self, ...
import itertools import os.path import pytest from docarray import Document, DocumentArray from jina import Client, Executor, Flow, requests from jina.helper import random_port PROTOCOLS = ['grpc', 'http', 'websocket'] cur_dir = os.path.dirname(__file__) class MyExecutor(Executor): @requests def foo(self, ...
# mypy: allow-untyped-defs from contextlib import contextmanager from typing import NoReturn try: from torch._C import _itt except ImportError: class _ITTStub: @staticmethod def _fail(*args, **kwargs) -> NoReturn: raise RuntimeError( "ITT functions not installed. A...
# mypy: allow-untyped-defs from contextlib import contextmanager try: from torch._C import _itt except ImportError: class _ITTStub: @staticmethod def _fail(*args, **kwargs): raise RuntimeError( "ITT functions not installed. Are you sure you have a ITT build?" ...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.ndarray import NdArray @_register_proto(proto_type_name='audio_ndarray') class AudioNdArray(AbstractAudioTensor, NdArray): """ Subclass of N...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.ndarray import NdArray @_register_proto(proto_type_name='audio_ndarray') class AudioNdArray(AbstractAudioTensor, NdArray): """ Subclass of N...
""" Epub parser. Contains parsers for epub files. """ from pathlib import Path from typing import Dict, List, Optional import logging from fsspec import AbstractFileSystem from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name__) class E...
"""Epub parser. Contains parsers for epub files. """ from pathlib import Path from typing import Dict, List, Optional import logging from fsspec import AbstractFileSystem from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name__) class Ep...
import os from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar import orjson from pydantic import BaseModel, Field from rich.console import Console from docarray.base_document.base_node import BaseNode from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode from docarray.base_docume...
import os from typing import Optional, Type import orjson from pydantic import BaseModel, Field from rich.console import Console from docarray.base_document.base_node import BaseNode from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode from docarray.base_document.mixins import IOMixin, Upd...
import os import sys from test_utils import DirectoryExcursion if len(sys.argv) != 4: print("Usage: {} [wheel to rename] [commit id] [platform tag]".format(sys.argv[0])) sys.exit(1) whl_path = sys.argv[1] commit_id = sys.argv[2] platform_tag = sys.argv[3] dirname, basename = os.path.dirname(whl_path), os.p...
import os import sys from contextlib import contextmanager @contextmanager def cd(path): path = os.path.normpath(path) cwd = os.getcwd() os.chdir(path) print("cd " + path) try: yield path finally: os.chdir(cwd) if len(sys.argv) != 4: print('Usage: {} [wheel to rename] [co...
import importlib import os import re from pathlib import Path from typing import Type, TypeVar from backend.data.block import Block # Dynamically load all modules under backend.blocks AVAILABLE_MODULES = [] current_dir = Path(__file__).parent modules = [ str(f.relative_to(current_dir))[:-3].replace(os.path.sep, "...
import importlib import os import re from pathlib import Path from typing import Type, TypeVar from backend.data.block import Block # Dynamically load all modules under backend.blocks AVAILABLE_MODULES = [] current_dir = Path(__file__).parent modules = [ str(f.relative_to(current_dir))[:-3].replace(os.path.sep, "...
from datetime import datetime from typing import List from backend.blocks.exa._auth import ( ExaCredentials, ExaCredentialsField, ExaCredentialsInput, ) from backend.blocks.exa.helpers import ContentSettings from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.mod...
from datetime import datetime from typing import List from backend.blocks.exa._auth import ( ExaCredentials, ExaCredentialsField, ExaCredentialsInput, ) from backend.blocks.exa.helpers import ContentSettings from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.mod...
try: import sklearn except ImportError: sklearn = None def _validate_data(estimator, *args, **kwargs): """Validate the input data. wrapper for sklearn.utils.validation.validate_data or BaseEstimator._validate_data depending on the scikit-learn version. TODO: remove when minimum scikit-learn ...
import sklearn from packaging.version import parse as parse_version from sklearn import get_config sklearn_version = parse_version(parse_version(sklearn.__version__).base_version) if sklearn_version < parse_version("1.6"): def patched_more_tags(estimator, expected_failed_checks): import copy fro...
from typing import Any, Dict, Union import torch from torchvision import datapoints, transforms as _transforms from torchvision.transforms.v2 import functional as F, Transform from .utils import is_simple_tensor class ConvertBoundingBoxFormat(Transform): _transformed_types = (datapoints.BoundingBox,) def ...
from typing import Any, Dict, Union import torch from torchvision import datapoints, transforms as _transforms from torchvision.transforms.v2 import functional as F, Transform from .utils import is_simple_tensor class ConvertBoundingBoxFormat(Transform): _transformed_types = (datapoints.BoundingBox,) def ...
import time from jina import Flow from tests.integration.instrumentation import ExecutorTestWithTracing, get_traces def test_span_order(jaeger_port, otlp_collector, otlp_receiver_port): f = Flow( tracing=True, traces_exporter_host='http://localhost', traces_exporter_port=otlp_receiver_por...
import time from jina import Flow from tests.integration.instrumentation import ExecutorTestWithTracing, get_traces def test_span_order(jaeger_port, otlp_collector, otlp_receiver_port): f = Flow( tracing=True, traces_exporter_host='http://localhost', traces_exporter_port=otlp_receiver_por...
from torch import nn, Tensor __all__ = [ "Wav2Letter", ] class Wav2Letter(nn.Module): r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech Recognition System* [:footcite:`collobert2016wav2letter`]. :math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{str...
from torch import Tensor from torch import nn __all__ = [ "Wav2Letter", ] class Wav2Letter(nn.Module): r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech Recognition System* [:footcite:`collobert2016wav2letter`]. :math:`\text{padding} = \frac{\text{ceil}(\text{ke...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
import types from keras.src.activations.activations import celu from keras.src.activations.activations import elu from keras.src.activations.activations import exponential from keras.src.activations.activations import gelu from keras.src.activations.activations import glu from keras.src.activations.activations import ...
import types from keras.src.activations.activations import celu from keras.src.activations.activations import elu from keras.src.activations.activations import exponential from keras.src.activations.activations import gelu from keras.src.activations.activations import glu from keras.src.activations.activations import ...
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model from backend.data.execution import ( GraphExecution, NodeExecutionResult, RedisExecutionEventBus, create_graph_execution, get_graph_execution, get_incomplete_node_executions, get_latest_node_execution, get_no...
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model from backend.data.execution import ( GraphExecutionMeta, NodeExecutionResult, RedisExecutionEventBus, create_graph_execution, get_incomplete_node_executions, get_latest_node_execution, get_node_execution_results,...
"""Various utilities to help with development.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import platform import warnings from collections.abc import Sequence import numpy as np from ..exceptions import DataConversionWarning from . import _joblib, metadata_routing from ._bunch...
"""Various utilities to help with development.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import platform import warnings from collections.abc import Sequence import numpy as np from ..exceptions import DataConversionWarning from . import _joblib, metadata_routing from ._bunch...
from langchain_core.example_selectors.semantic_similarity import ( MaxMarginalRelevanceExampleSelector, SemanticSimilarityExampleSelector, sorted_values, ) __all__ = [ "MaxMarginalRelevanceExampleSelector", "SemanticSimilarityExampleSelector", "sorted_values", ]
from langchain_core.example_selectors.semantic_similarity import ( MaxMarginalRelevanceExampleSelector, SemanticSimilarityExampleSelector, sorted_values, ) __all__ = [ "sorted_values", "SemanticSimilarityExampleSelector", "MaxMarginalRelevanceExampleSelector", ]
import sys import traceback from importlib.machinery import SourceFileLoader if __name__ == "__main__": files = sys.argv[1:] has_failure = False for file in files: try: SourceFileLoader("x", file).load_module() except Exception: has_failure = True traceba...
import sys import traceback from importlib.machinery import SourceFileLoader if __name__ == "__main__": files = sys.argv[1:] has_failure = False for file in files: try: SourceFileLoader("x", file).load_module() except Exception: has_faillure = True traceb...
# Copyright (c) OpenMMLab. All rights reserved. """This file holding some environment constant for sharing by other files.""" import os.path as osp import subprocess import sys from collections import OrderedDict, defaultdict from distutils import errors import cv2 import numpy as np import torch import mmengine from...
# Copyright (c) OpenMMLab. All rights reserved. """This file holding some environment constant for sharing by other files.""" import os.path as osp import subprocess import sys from collections import OrderedDict, defaultdict import cv2 import numpy as np import torch import mmengine from .parrots_wrapper import TORC...
# Copyright (c) OpenMMLab. All rights reserved. from .vis_backend import (BaseVisBackend, ClearMLVisBackend, DVCLiveVisBackend, LocalVisBackend, MLflowVisBackend, NeptuneVisBackend, TensorboardVisBackend, WandbVisBackend) from .visualizer import Visualizer __all__ = ...
# Copyright (c) OpenMMLab. All rights reserved. from .vis_backend import (BaseVisBackend, ClearMLVisBackend, LocalVisBackend, MLflowVisBackend, NeptuneVisBackend, TensorboardVisBackend, WandbVisBackend) from .visualizer import Visualizer __all__ = [ 'Visualizer',...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, ...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, ...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseTripletLoss(TripletLoss): def __init_...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseTripletLoss(TripletLoss): def __init_...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.)
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.)
"""Weaviate reader.""" from typing import Any, List, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class WeaviateReader(BaseReader): """ Weaviate reader. Retrieves documents from Weaviate through vector lookup. Allows option to concatenat...
"""Weaviate reader.""" from typing import Any, List, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class WeaviateReader(BaseReader): """Weaviate reader. Retrieves documents from Weaviate through vector lookup. Allows option to concatenate ret...
from typing import Sequence, cast import prisma.enums import prisma.types AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = { "Input": True, "Output": True, "Webhook": True, "AgentBlock": True, } AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = { "Nodes": {"include": AGENT_NODE_INCLUDE} } ...
from typing import cast import prisma.enums import prisma.types from backend.blocks.io import IO_BLOCK_IDs AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = { "Input": True, "Output": True, "Webhook": True, "AgentBlock": True, } AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = { "Nodes": ...
from typing import Any, List, Optional, Tuple import numpy as np import pytest from docarray import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.typing import NdArray from docarray.utils._internal.pydantic import is_pydantic_v2 def test_base_document_init(): doc = BaseDoc() asser...
from typing import Any, List, Optional, Tuple import numpy as np import pytest from docarray import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.typing import NdArray from docarray.utils._internal.pydantic import is_pydantic_v2 def test_base_document_init(): doc = BaseDoc() asser...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
_base_ = './solov2_r50_fpn_ms-3x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(checkpoint='torchvision://resnet101')))
_base_ = 'solov2_r50_fpn_mstrain_3x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(checkpoint='torchvision://resnet101')))
# ReAct agent formatter import logging from abc import abstractmethod from typing import List, Optional, Sequence from llama_index.core.agent.react.prompts import ( CONTEXT_REACT_CHAT_SYSTEM_HEADER, REACT_CHAT_SYSTEM_HEADER, ) from llama_index.core.agent.react.types import ( BaseReasoningStep, Observa...
# ReAct agent formatter import logging from abc import abstractmethod from typing import List, Optional, Sequence from llama_index.core.agent.react.prompts import ( CONTEXT_REACT_CHAT_SYSTEM_HEADER, REACT_CHAT_SYSTEM_HEADER, ) from llama_index.core.agent.react.types import ( BaseReasoningStep, Observa...
import os import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDoc from docarray.typing import ImageBytes, ImageNdArray, ImageTensor, ImageTorchTensor from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_available() if tf_available: im...
import os import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray.typing import ImageBytes, ImageNdArray, ImageTorchTensor from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.t...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( random_size_range=(10, 20), backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) img_scale = (640, 640) # height, width tra...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( random_size_range=(10, 20), backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) img_scale = (640, 640) train_pipeline = [ ...
from .conv_emformer import ConvEmformer from .conv_tasnet import conv_tasnet_base from .rnnt import conformer_rnnt_base, conformer_rnnt_model __all__ = [ "conformer_rnnt_base", "conformer_rnnt_model", "conv_tasnet_base", "ConvEmformer", ]
from .conv_emformer import ConvEmformer from .conv_tasnet import conv_tasnet_base from .hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium from .rnnt import conformer_rnnt_base, conformer_rnnt_model __all__ = [ "conformer_rnnt_base", "conformer_rnnt_model", "conv_tasnet_base", "ConvEmfor...
from __future__ import annotations from typing import TYPE_CHECKING, Any from langchain_core.callbacks import Callbacks from langchain_core.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainGroup, AsyncCallbackManagerForChainRun, AsyncCallbackManagerForLLMRun, AsyncCallb...
from __future__ import annotations from typing import TYPE_CHECKING, Any from langchain_core.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainGroup, AsyncCallbackManagerForChainRun, AsyncCallbackManagerForLLMRun, AsyncCallbackManagerForRetrieverRun, AsyncCallbackMan...
from sentence_transformers import SentenceTransformer from contextlib import nullcontext from sentence_transformers.evaluation import SentenceEvaluator import logging import os import csv from typing import Dict, List, Optional logger = logging.getLogger(__name__) class MSEEvaluator(SentenceEvaluator): """ ...
from sentence_transformers import SentenceTransformer from contextlib import nullcontext from sentence_transformers.evaluation import SentenceEvaluator import logging import os import csv from typing import List, Optional logger = logging.getLogger(__name__) class MSEEvaluator(SentenceEvaluator): """ Comput...
from keras.src import tree from keras.src.trainers.data_adapters import data_adapter_utils from keras.src.trainers.data_adapters.data_adapter import DataAdapter class TFDatasetAdapter(DataAdapter): """Adapter that handles `tf.data.Dataset`.""" def __init__(self, dataset, class_weight=None, distribution=None)...
from keras.src import tree from keras.src.trainers.data_adapters import data_adapter_utils from keras.src.trainers.data_adapters.data_adapter import DataAdapter class TFDatasetAdapter(DataAdapter): """Adapter that handles `tf.data.Dataset`.""" def __init__(self, dataset, class_weight=None, distribution=None)...
""" This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage: python trai...
""" This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage: python trai...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, ...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, ...
import os import shutil import subprocess import sys def _get_run_args(print_args: bool = True): from jina.helper import get_rich_console from jina.parsers import get_main_parser console = get_rich_console() silent_print = {'help', 'hub', 'export', 'auth'} parser = get_main_parser() if len(...
import os import shutil import subprocess import sys def _get_run_args(print_args: bool = True): from jina.helper import get_rich_console from jina.parsers import get_main_parser console = get_rich_console() silent_print = {'help', 'hub', 'export'} parser = get_main_parser() if len(sys.argv...
# Copyright (c) OpenMMLab. All rights reserved. """copy from https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" import torch from torch import Tensor from mmdet.core.bbox.assigners import AssignResult from .sampling_result import SamplingResult class MaskSamplingResult(SamplingResult): ...
# Copyright (c) OpenMMLab. All rights reserved. """copy from https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" import torch from .sampling_result import SamplingResult class MaskSamplingResult(SamplingResult): """Mask sampling result.""" def __init__(self, pos_inds, neg_inds, ...
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import nn class CNN(nn.Module): """CNN-layer with multiple kernel-sizes over the word embeddings"...
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import nn class CNN(nn.Module): """CNN-layer with multiple kernel-sizes over the word embeddings"...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.fashion_mnist import load_data as load_data
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.fashion_mnist import load_data
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class RetinaNet(SingleStageDetector): """Implementation of `RetinaNet <https://arxiv.org/...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union from mmengine.config import ConfigDict from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class RetinaNet(SingleStageDetector): """Implementation of `RetinaNet <https://arxiv...
"""Pydantic v1 compatibility shim.""" from langchain_core._api import warn_deprecated try: from pydantic.v1.dataclasses import * # noqa: F403 except ImportError: from pydantic.dataclasses import * # type: ignore[no-redef] # noqa: F403 warn_deprecated( "0.3.0", removal="1.0.0", alternative="pyda...
"""Pydantic v1 compatibility shim.""" from langchain_core._api import warn_deprecated try: from pydantic.v1.dataclasses import * # noqa: F403 except ImportError: from pydantic.dataclasses import * # noqa: F403 warn_deprecated( "0.3.0", removal="1.0.0", alternative="pydantic.v1 or pydantic", ...
# Copyright (c) OpenMMLab. All rights reserved. """Collecting some commonly used type hint in mmdetection.""" from typing import List, Optional, Union from mmengine.config import ConfigDict from mmengine.data import InstanceData from ..bbox.samplers import SamplingResult from ..data_structures import DetDataSample #...
# Copyright (c) OpenMMLab. All rights reserved. """Collecting some commonly used type hint in mmdetection.""" from typing import List, Optional, Union from mmengine.config import ConfigDict from mmengine.data import InstanceData from ..data_structures import DetDataSample # Type hint of config data ConfigType = Uni...
import numpy as np from absl.testing import parameterized from keras.src import backend from keras.src import testing from keras.src.utils import backend_utils class BackendUtilsTest(testing.TestCase): @parameterized.named_parameters( ("numpy", "numpy"), ("jax", "jax"), ("tensorflow", "te...
import numpy as np from absl.testing import parameterized from keras.src import backend from keras.src import testing from keras.src.utils import backend_utils class BackendUtilsTest(testing.TestCase): @parameterized.named_parameters( ("numpy", "numpy"), ("jax", "jax"), ("tensorflow", "te...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch import torch.nn.functional as F from mmcv.cnn import constant_init from mmdet.models.utils import DyReLU, SELayer def test_se_layer(): with pytest.raises(AssertionError): # act_cfg sequence length must equal to 2 SELayer(c...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.utils import SELayer def test_se_layer(): with pytest.raises(AssertionError): # act_cfg sequence length must equal to 2 SELayer(channels=32, act_cfg=(dict(type='ReLU'), )) with pytest.raises(Assertio...
_base_ = './solo_r50_fpn_8xb8-lsj-200e_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512]))
_base_ = './solo_r50_fpn_lsj_200e_8x8_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512]))
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence, Union from mmengine.data import BaseDataElement from mmengine.hooks import Hook from mmengine.runner import Runner from mmdet.registry import HOOKS @HOOKS.register_module() class MemoryProfilerHook(Hook): """Memory profiler h...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.runner.hooks import Hook from mmdet.registry import HOOKS @HOOKS.register_module() class MemoryProfilerHook(Hook): """Memory profiler hook recording memory information including virtual memory, swap memory, and the memory of the current process. ...
from .filtering import ( allpass_biquad, band_biquad, bandpass_biquad, bandreject_biquad, bass_biquad, biquad, contrast, dcshift, deemph_biquad, dither, equalizer_biquad, filtfilt, flanger, gain, highpass_biquad, lfilter, lowpass_biquad, overdrive,...
from .filtering import ( allpass_biquad, band_biquad, bandpass_biquad, bandreject_biquad, bass_biquad, biquad, contrast, dcshift, deemph_biquad, dither, equalizer_biquad, filtfilt, flanger, gain, highpass_biquad, lfilter, lowpass_biquad, overdrive,...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ]
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ]
import logging from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembledistil") evaluator = SparseNanoBEIR...
import logging from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembledistil") evaluator = SparseNanoBEIR...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Sequence from torch.utils.data import BatchSampler, Sampler from mmdet.datasets.samplers.track_img_sampler import TrackImgSampler from mmdet.registry import DATA_SAMPLERS # TODO: maybe replace with a data_loader wrapper @DATA_SAMPLERS.register_modul...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Sequence from torch.utils.data import BatchSampler, Sampler from mmdet.registry import DATA_SAMPLERS # TODO: maybe replace with a data_loader wrapper @DATA_SAMPLERS.register_module() class AspectRatioBatchSampler(BatchSampler): """A sampler wrap...
from __future__ import annotations import re from typing import Optional from langchain_core.output_parsers import BaseOutputParser class RegexDictParser(BaseOutputParser[dict[str, str]]): """Parse the output of an LLM call into a Dictionary using a regex.""" regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" #...
from __future__ import annotations import re from typing import Dict, Optional from langchain_core.output_parsers import BaseOutputParser class RegexDictParser(BaseOutputParser[Dict[str, str]]): """Parse the output of an LLM call into a Dictionary using a regex.""" regex_pattern: str = r"{}:\s?([^.'\n']*)\...
import os import subprocess import sys import pytest from xgboost import testing as tm DEMO_DIR = tm.demo_dir(__file__) PYTHON_DEMO_DIR = os.path.join(DEMO_DIR, "guide-python") @pytest.mark.skipif(**tm.no_cupy()) def test_data_iterator(): script = os.path.join(PYTHON_DEMO_DIR, "quantile_data_iterator.py") ...
import os import subprocess import sys import pytest from xgboost import testing as tm DEMO_DIR = tm.demo_dir(__file__) PYTHON_DEMO_DIR = os.path.join(DEMO_DIR, "guide-python") @pytest.mark.skipif(**tm.no_cupy()) def test_data_iterator(): script = os.path.join(PYTHON_DEMO_DIR, "quantile_data_iterator.py") ...
from __future__ import annotations import math import random class NoDuplicatesDataLoader: def __init__(self, train_examples, batch_size): """ A special data loader to be used with MultipleNegativesRankingLoss. The data loader ensures that there are no duplicate sentences within the same ...
import math import random class NoDuplicatesDataLoader: def __init__(self, train_examples, batch_size): """ A special data loader to be used with MultipleNegativesRankingLoss. The data loader ensures that there are no duplicate sentences within the same batch """ self.batch...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Optional, Sequence, Tuple, Union import torch from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]] @HOOKS.register_module() class EmptyC...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Optional, Sequence, Tuple, Union import torch from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]] @HOOKS.register_module() class EmptyC...
import pytest import torch from docarray.computation.torch_backend import TorchCompBackend def test_to_device(): t = torch.rand(10, 3) assert t.device == torch.device('cpu') t = TorchCompBackend.to_device(t, 'meta') assert t.device == torch.device('meta') @pytest.mark.parametrize( 'array,result...
import pytest import torch from docarray.computation.torch_backend import TorchCompBackend def test_to_device(): t = torch.rand(10, 3) assert t.device == torch.device('cpu') t = TorchCompBackend.to_device(t, 'meta') assert t.device == torch.device('meta') @pytest.mark.parametrize( 'array,result...
"""Tool for the OpenAI DALLE V1 Image Generation SDK.""" from typing import Optional from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper class OpenAIDALLEImageGenerationTool(BaseTool...
"""Tool for the OpenAI DALLE V1 Image Generation SDK.""" from typing import Optional from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper class OpenAIDALLEImageGenerationTool(BaseTool...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import Mock, patch from mmdet.engine.hooks import YOLOXModeSwitchHook class TestYOLOXModeSwitchHook(TestCase): @patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper') def test_is_model_wrapper_and_p...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import Mock, patch from mmdet.engine.hooks import YOLOXModeSwitchHook class TestYOLOXModeSwitchHook(TestCase): @patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper') def test_is_model_wrapper_and_p...
_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './retinanet_r50_fpn_lsj_200e_8x8_fp16_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
import pytest from docarray.utils.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow as tf import tensorflow._api.v2.experimental.numpy as tnp from docarray.computation.tensorflow_backend import TensorFlowCompBackend from docarray.typing import TensorFlowT...
import pytest try: import tensorflow as tf import tensorflow._api.v2.experimental.numpy as tnp from docarray.computation.tensorflow_backend import TensorFlowCompBackend from docarray.typing import TensorFlowTensor except (ImportError, TypeError): pass @pytest.mark.tensorflow def test_top_k_desce...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .cornernet import CornerNet from .deformable_detr import DeformableDETR from .detr import DETR from .fast_r...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .cornernet import CornerNet from .deformable_detr import DeformableDETR from .detr import DETR from .fast_r...
# Copyright (c) OpenMMLab. All rights reserved. from unittest.mock import Mock from mmengine.hooks import SyncBuffersHook class TestSyncBuffersHook: def test_sync_buffers_hook(self): Runner = Mock() Runner.model = Mock() Hook = SyncBuffersHook() Hook._after_epoch(Runner)
# Copyright (c) OpenMMLab. All rights reserved. from unittest.mock import Mock from mmengine.hooks import SyncBuffersHook class TestSyncBuffersHook: def test_sync_buffers_hook(self): Runner = Mock() Runner.model = Mock() Hook = SyncBuffersHook() Hook.after_epoch(Runner)
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np from mmdet.core.mask import BitmapMasks, PolygonMasks def _check_fields(results, pipeline_results, keys): """Check data in fields from two results are same.""" for key in keys: if isinstance(results[key], (BitmapMasks, PolygonMasks)):...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np from mmdet.core.mask import BitmapMasks, PolygonMasks def _check_fields(results, pipeline_results, keys): """Check data in fields from two results are same.""" for key in keys: if isinstance(results[key], (BitmapMasks, PolygonMasks)):...
_base_ = './retinanet_r50_fpn_crop640-50e_coco.py' # model settings model = dict( # `pad_size_divisor=128` ensures the feature maps sizes # in `NAS_FPN` won't mismatch. data_preprocessor=dict(pad_size_divisor=128), neck=dict( _delete_=True, type='NASFPN', in_channels=[256, 512, ...
_base_ = './retinanet_r50_fpn_crop640_50e_coco.py' # model settings model = dict( # `pad_size_divisor=128` ensures the feature maps sizes # in `NAS_FPN` won't mismatch. data_preprocessor=dict(pad_size_divisor=128), neck=dict( _delete_=True, type='NASFPN', in_channels=[256, 512, ...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.7.1' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versio...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.7.0' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versio...
__version__ = '0.13.1' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install() if 'NO_VERSION_CHECK' not in os.environ: from .helper import is_latest_versi...
__version__ = '0.13.0' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install() if 'NO_VERSION_CHECK' not in os.environ: from .helper import is_latest_versi...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and i...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and i...
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
from __future__ import annotations from pathlib import Path from unittest.mock import Mock, PropertyMock import pytest import torch from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import InformationRetrievalEvaluator from sentence_transformers.util import cos_sim @pytest...
from __future__ import annotations from unittest.mock import Mock, PropertyMock import pytest import torch from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import InformationRetrievalEvaluator from sentence_transformers.util import cos_sim @pytest.fixture def mock_model()...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from docarray import BaseDoc from docarray.typing import ImageUrl def test_set_image_url(): class MyDocument(BaseDoc): image_url: ImageUrl d = MyDocument(image_url="https://jina.ai/img.png") assert isinstance(d.image_url, ImageUrl) assert d.image_url == "https://jina.ai/img.png"
from typing import TYPE_CHECKING if TYPE_CHECKING: from docarray import Document def image_getter(doc: 'Document'): if doc._metadata['image_type'] == 'uri': return doc.uri elif doc._metadata['image_type'] == 'PIL': from PIL import Image return Image.fromarray(doc.tensor) elif...
from typing import TYPE_CHECKING if TYPE_CHECKING: from docarray import Document def image_getter(doc: 'Document'): if doc._metadata['image_type'] == 'uri': return doc.uri elif doc._metadata['image_type'] == 'PIL': from PIL import Image return Image.fromarray(doc.tensor) elif...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...