input
stringlengths
33
5k
output
stringlengths
32
5k
# Copyright (c) OpenMMLab. All rights reserved. import argparse import cv2 import mmcv from mmcv.transforms import Compose from mmdet.apis import inference_detector, init_detector from mmdet.registry import VISUALIZERS from mmdet.utils import register_all_modules def parse_args(): parser = argparse.ArgumentPars...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import cv2 import mmcv from mmdet.apis import inference_detector, init_detector def parse_args(): parser = argparse.ArgumentParser(description='MMDetection video demo') parser.add_argument('video', help='Video file') parser.add_argument('co...
from __future__ import annotations __version__ = "3.5.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.backend import ( export_dynamic_quantized_onnx_model, export_optimized_onnx_model, export_static_quantized_openvino_model, ) from senten...
from __future__ import annotations __version__ = "3.5.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.backend import ( export_dynamic_quantized_onnx_model, export_optimized_onnx_model, export_static_quantized_openvino_model, ) from senten...
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parq...
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parq...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.docstore.base import AddableMixin, Docstore # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional im...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.docstore.base import AddableMixin, Docstore # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional im...
from typing import Dict, List, Optional from docarray import DocArray def reduce( left: DocArray, right: DocArray, left_id_map: Optional[Dict] = None ) -> 'DocArray': """ Reduces left and right DocArray into one DocArray in-place. Changes are applied to the left DocArray. Reducing 2 DocArrays con...
from docarray import DocumentArray from typing import List, Optional, Dict def reduce( left: DocumentArray, right: DocumentArray, left_id_map: Optional[Dict] = None ) -> 'DocumentArray': """ Reduces left and right DocumentArray into one DocumentArray in-place. Changes are applied to the left DocumentA...
""" Experimental Object Oriented Distributed API - torch.distributed._dist2 ======================================================================= This is an experimental new API for PyTorch Distributed. This is actively in development and subject to change or deletion entirely. This is intended as a proving ground ...
""" Experimental Object Oriented Distributed API - torch.distributed._dist2 ======================================================================= This is an experimental new API for PyTorch Distributed. This is actively in development and subject to change or deletion entirely. This is intended as a proving ground ...
# Copyright (c) OpenMMLab. All rights reserved. from .hub import load_url from .manager import ManagerMeta, ManagerMixin from .misc import (check_prerequisites, concat_list, deprecated_api_warning, find_latest_checkpoint, has_batch_norm, has_method, import_modules_from_strings, is_...
# Copyright (c) OpenMMLab. All rights reserved. from .hub import load_url from .manager import ManagerMeta, ManagerMixin from .misc import (check_prerequisites, concat_list, deprecated_api_warning, find_latest_checkpoint, has_method, import_modules_from_strings, is_list_of, ...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch.nn.functional as F from mmcv.runner import BaseModule, force_fp32 from mmengine.model import stack_batch from ..builder import build_loss from ..utils import interpolate_as class BaseSemanticHead(BaseModule, metacla...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch.nn.functional as F from mmcv.runner import BaseModule, force_fp32 from ...core.utils import stack_batch from ..builder import build_loss from ..utils import interpolate_as class BaseSemanticHead(BaseModule, metaclas...
from typing import Any, Optional, Union, cast from langchain_core._api import deprecated from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.output_parsers import BaseLLMOutputParser from langchain_core.output_parsers.openai_f...
from typing import Any, Optional, Union, cast from langchain_core._api import deprecated from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.output_parsers import BaseLLMOutputParser from langchain_core.output_parsers.openai_f...
"""Data embedding techniques.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._isomap import Isomap from ._locally_linear import LocallyLinearEmbedding, locally_linear_embedding from ._mds import MDS, smacof from ._spectral_embedding import SpectralEmbedding, spectral_embedding...
"""Data embedding techniques.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._isomap import Isomap from ._locally_linear import LocallyLinearEmbedding, locally_linear_embedding from ._mds import MDS, smacof from ._spectral_embedding import SpectralEmbedding, spectral_embedding...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.dist import all_reduce_params, is_distributed from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class SyncBuffersHook(Hook): """Synchronize model buffers such as running_mean and running_var in BN at the end of eac...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine import dist from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class SyncBuffersHook(Hook): """Synchronize model buffers such as running_mean and running_var in BN at the end of each epoch.""" priority = 'NORMA...
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_SAMPLERS from ..transforms import bbox2roi from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class OHEMSampler(BaseSampler): r"""Online Hard Example Mining Sampler described in `Training Region-based ...
import torch from ..builder import BBOX_SAMPLERS from ..transforms import bbox2roi from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class OHEMSampler(BaseSampler): r"""Online Hard Example Mining Sampler described in `Training Region-based Object Detectors with Online Hard Example Mining...
# Copyright (c) OpenMMLab. All rights reserved. import datetime import os.path as osp from tempfile import TemporaryDirectory from unittest import TestCase, skipIf from mmengine.logging import MMLogger from mmengine.registry import (DefaultScope, Registry, count_registered_modules, init_...
# Copyright (c) OpenMMLab. All rights reserved. import datetime import os.path as osp from tempfile import TemporaryDirectory from unittest import TestCase, skipIf from mmengine.registry import (DefaultScope, Registry, count_registered_modules, init_default_scope, ...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .single_stage import SingleStageDetector @MODELS.register_module() class YOLOF(SingleStageDetector): r"""Implementation of `You Only Look One-level Feature <...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core.utils.typing import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class YOLOF(SingleStageDetector): r"""Implementation of `You Only Look One-level F...
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py'] model = dict( data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32), backbone=dict( norm_cfg=dict(requires_grad=False), ...
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py'] preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) model = dict( preprocess_cfg=preprocess_cfg, backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, ...
import numpy as np import torch from docarray.document import BaseDocument from docarray.typing import AnyUrl, NdArray, TorchTensor def test_to_json(): class Mmdoc(BaseDocument): img: NdArray url: AnyUrl txt: str torch_tensor: TorchTensor doc = Mmdoc( img=np.zeros((3,...
import numpy as np import torch from docarray.document import BaseDocument from docarray.typing import AnyUrl, Tensor, TorchTensor def test_to_json(): class Mmdoc(BaseDocument): img: Tensor url: AnyUrl txt: str torch_tensor: TorchTensor doc = Mmdoc( img=np.zeros((3, 2...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers. It generates sentence embeddings that can be compared using cosine-simi...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64]. It generates sentence embeddings that can be compared us...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn from autogpt_libs.feature_flag.client import ( initialize_launchdarkly, shutdown_launchdarkly, ) import backend.data.block import backend.data.db import backend.data.graph imp...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn from autogpt_libs.feature_flag.client import ( initialize_launchdarkly, shutdown_launchdarkly, ) import backend.data.block import backend.data.db import backend.data.graph imp...
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' model = dict( backbone=dict( num_layers=[3, 8, 27, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_large.pth'))) # Enable automatic-mixed-precision training with AmpOptimWrapper. optim_wrapper = ...
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' model = dict( backbone=dict( num_layers=[3, 8, 27, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_large.pth'))) fp16 = dict(loss_scale=dict(init_scale=512))
import os from typing import BinaryIO, Optional, Tuple, Union import torch from .backend import Backend from .common import AudioMetaData class SoXBackend(Backend): @staticmethod def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData: if has...
import os from typing import BinaryIO, Optional, Tuple, Union import torch from torchaudio.backend.common import AudioMetaData from .backend import Backend class SoXBackend(Backend): @staticmethod def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaDa...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
import sys from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime from jina.serve.runtimes.head.request_handling import HeaderRequestHandler from jina.parsers import set_pod_parser def run(*args, **kwargs): runtime_args = set_pod_parser().parse_args(args) runtime_args.host = runtime_args.host[0] run...
import sys from jina.serve.runtimes.head import HeadRuntime from jina.parsers import set_pod_parser def run(*args, **kwargs): runtime_args = set_pod_parser().parse_args(args) runtime_args.host = runtime_args.host[0] runtime_args.port = runtime_args.port[0] with HeadRuntime(runtime_args) as runtime: ...
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
from typing import Dict, Set, Type from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.tensor import Tensor __all__ = [ 'NdArray', 'Tensor', 'Embedding', 'NdArrayEmbedding', 'framework_types', '...
from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.tensor import Tensor __all__ = [ 'NdArray', 'Tensor', 'Embedding', 'NdArrayEmbedding', ] try: import torch # noqa: F401 except ImportError: p...
"""MutliOn Client API tools.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.multion.close_session import MultionCloseSession from langchain_community.tools.multion.create_session import MultionCreateSession from langcha...
"""MutliOn Client API tools.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.multion.close_session import MultionCloseSession from langchain_community.tools.multion.create_session import MultionCreateSession from langcha...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import ConvModule, Linear from mmengine.model import ModuleList from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import MultiConfig from .fcn_mask_head import FCNMaskHead @MODELS.register_module() class CoarseMaskHead(FCNMaskHea...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import ConvModule, Linear from mmengine.model import ModuleList from torch import Tensor from mmdet.core.utils import MultiConfig from mmdet.registry import MODELS from .fcn_mask_head import FCNMaskHead @MODELS.register_module() class CoarseMaskHead(FCNMa...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import librosa import pytest from jina import Document, DocumentArray, Flow from ...vggish import vggish_input cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_flow_f...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import librosa from jina import Flow, Document, DocumentArray from ...vggish import vggish_input from ...vggish_audio_encoder import VggishAudioEncoder cur_dir = os.path.dirname(os.path.abspath(__fil...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores import SKLearnVectorStore from langchain_community.vectorstores.sklearn import ( BaseSerializer, BsonSerializer, JsonSerializer, ParquetSeria...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores import SKLearnVectorStore from langchain_community.vectorstores.sklearn import ( BaseSerializer, BsonSerializer, JsonSerializer, ParquetSeria...
from langchain_core.embeddings import Embeddings from langchain_core.utils import secret_from_env from openai import OpenAI from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator from typing_extensions import Self # type: ignore class FireworksEmbeddings(BaseModel, Embeddings): """Firework...
from langchain_core.embeddings import Embeddings from langchain_core.utils import secret_from_env from openai import OpenAI from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator from typing_extensions import Self # type: ignore class FireworksEmbeddings(BaseModel, Embeddings): """Firework...
from abc import ABC, abstractmethod from typing import Dict, Iterator, List, Optional, Type from typing_extensions import TYPE_CHECKING if TYPE_CHECKING: from docarray import BaseDoc, DocList class AbstractDocStore(ABC): @staticmethod @abstractmethod def list(namespace: str, show_table: bool) -> Lis...
from abc import ABC, abstractmethod from typing import Dict, Iterator, List, Optional, Type from typing_extensions import TYPE_CHECKING if TYPE_CHECKING: from docarray import BaseDoc, DocArray class AbstractDocStore(ABC): @staticmethod @abstractmethod def list(namespace: str, show_table: bool) -> Li...
"""Macrometa GDN Reader.""" import json from typing import List import requests from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class MacrometaGDNReader(BaseReader): """ Macrometa GDN Reader. Reads vectors from Macrometa GDN """ def __init__(...
"""Macrometa GDN Reader.""" import json from typing import List import requests from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class MacrometaGDNReader(BaseReader): """Macrometa GDN Reader. Reads vectors from Macrometa GDN """ def __init__(self,...
"""CIFAR10 small images classification dataset.""" import os import numpy as np from keras.src import backend from keras.src.api_export import keras_export from keras.src.datasets.cifar import load_batch from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.cifar10.load_data") def load_data...
"""CIFAR10 small images classification dataset.""" import os import numpy as np from keras.src import backend from keras.src.api_export import keras_export from keras.src.datasets.cifar import load_batch from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.cifar10.load_data") def load_data...
import csv import logging import os from typing import List from scipy.stats import pearsonr, spearmanr from sentence_transformers import InputExample logger = logging.getLogger(__name__) class CECorrelationEvaluator: """ This evaluator can be used with the CrossEncoder class. Given sentence pairs and cont...
import logging from scipy.stats import pearsonr, spearmanr from typing import List import os import csv from ... import InputExample logger = logging.getLogger(__name__) class CECorrelationEvaluator: """ This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores, ...
from typing import Any, Sequence from llama_index.core.base.llms.generic_utils import ( completion_response_to_chat_response, stream_completion_response_to_chat_response, ) from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, Compl...
from typing import Any, Sequence from llama_index.core.base.llms.generic_utils import ( completion_response_to_chat_response, stream_completion_response_to_chat_response, ) from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, Compl...
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import Tensor, nn from sentence_transformers.util import fullname, import_from_string class Dense(nn...
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import Tensor, nn from sentence_transformers.util import fullname, import_from_string class Dense(nn...
# coding=utf-8 # Copyright 2025 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
# coding=utf-8 # Copyright 2025 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import PlaywrightURLLoader from langchain_community.document_loaders.url_playwright import ( PlaywrightEvaluator, UnstructuredHtmlEvaluator, ) # Cre...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import PlaywrightURLLoader from langchain_community.document_loaders.url_playwright import ( PlaywrightEvaluator, UnstructuredHtmlEvaluator, ) # Cre...
from typing import List import numpy as np def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: """Return the number of possible shards according to the input gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case # until we decide how to define sha...
from typing import List import numpy as np def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: """Return the number of possible shards according to the input gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case # until we decide how to define sha...
from typing import Iterable, Dict from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin from docarray.array.storage.base.helper import Offset2ID from docarray import Document class GetSetDelMixin(BaseGetSetDelMixin): """Provide concrete implementation for ``__getitem__``, ``__setitem__``, and ...
from typing import Iterable, Dict from ..base.getsetdel import BaseGetSetDelMixin from ..base.helper import Offset2ID from .... import Document class GetSetDelMixin(BaseGetSetDelMixin): """Provide concrete implementation for ``__getitem__``, ``__setitem__``, and ``__delitem__`` for ``DocumentArrayWeaviate``"...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.activations import deserialize from keras.src.activations import get from keras.src.activations import serialize from keras.src.activations.activations import celu from keras.src.acti...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.activations import deserialize from keras.src.activations import get from keras.src.activations import serialize from keras.src.activations.activations import elu from keras.src.activ...
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
import socket from dataclasses import asdict import numpy as np import pytest import xgboost as xgb from xgboost import RabitTracker, build_info, federated from xgboost import testing as tm from xgboost.collective import Config def run_rabit_worker(rabit_env: dict, world_size: int) -> int: with xgb.collective.C...
import socket from dataclasses import asdict import numpy as np import pytest from loky import get_reusable_executor import xgboost as xgb from xgboost import RabitTracker, build_info, federated from xgboost import testing as tm from xgboost.collective import Config def run_rabit_worker(rabit_env: dict, world_size:...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import TOODHead def test_tood_head_loss(): """Tests paa head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_sh...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import TOODHead def test_paa_head_loss(): """Tests paa head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_sha...
import torch from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from torchaudio_unittest.prototype.rnnt_test_impl import ConformerRNNTTestImpl @skipIfNoCuda class ConformerRNNTFloat32GPUTest(ConformerRNNTTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda") ...
import torch from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase from torchaudio_unittest.prototype.rnnt_test_impl import ConformerRNNTTestImpl @skipIfNoCuda class ConformerRNNTFloat32GPUTest(ConformerRNNTTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda") ...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/mot_challenge.py', '../_base_/default_runtime.py' ] default_hooks = dict( logger=dict(type='LoggerHook', interval=1), visualization=dict(type='TrackVisualizationHook', draw=False)) vis_backends = [dict(type='LocalVisBackend')] v...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/mot_challenge.py', '../_base_/default_runtime.py' ] default_hooks = dict( logger=dict(type='LoggerHook', interval=1), visualization=dict(type='TrackVisualizationHook', draw=False)) vis_backends = [dict(type='LocalVisBackend')] v...
import os import time import numpy as np import pytest from pydantic import Field from docarray import BaseDoc from docarray.documents import ImageDoc from docarray.typing import NdArray pytestmark = [pytest.mark.slow, pytest.mark.index] cur_dir = os.path.dirname(os.path.abspath(__file__)) compose_yml_v7 = os.path....
import os import time import numpy as np import pytest from pydantic import Field from docarray import BaseDoc from docarray.typing import NdArray pytestmark = [pytest.mark.slow, pytest.mark.index] cur_dir = os.path.dirname(os.path.abspath(__file__)) compose_yml_v7 = os.path.abspath(os.path.join(cur_dir, 'v7/docker...
import numpy as np import pytest from keras.src import backend from keras.src import layers from keras.src import testing class GaussianDropoutTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_gaussian_dropout_basics(self): self.run_layer_test( layers.GaussianDropou...
import numpy as np import pytest from keras.src import backend from keras.src import layers from keras.src import testing class GaussianDropoutTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_gaussian_dropout_basics(self): self.run_layer_test( layers.GaussianDropou...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from docarray.documents import TextDoc def test_text_document_init(): text = TextDoc('hello world') assert text.text == 'hello world' assert text == 'hello world' text = TextDoc(text='hello world') assert text.text == 'hello world' assert text == 'hello world' text = TextDoc() assert...
import io import json import logging import os import tempfile from typing import IO import torch from torch._inductor import config from torch._inductor.cpp_builder import BuildOptionsBase, CppBuilder from torch.export.pt2_archive._package import ( AOTI_FILES, AOTICompiledModel, load_pt2, package_pt2,...
import io import json import logging import os import tempfile from typing import IO, Union import torch from torch._inductor import config from torch._inductor.cpp_builder import BuildOptionsBase, CppBuilder from torch.export.pt2_archive._package import AOTICompiledModel, load_pt2, package_pt2 from torch.types import...
# Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import unittest import numpy as np from mmengine.data import InstanceData, PixelData from mmdet.datasets.transforms import PackDetInputs from mmdet.structures import DetDataSample from mmdet.structures.mask import BitmapMasks class Te...
# Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import unittest import numpy as np from mmengine.data import BaseDataElement as PixelData from mmengine.data import InstanceData from mmdet.datasets.transforms import PackDetInputs from mmdet.structures import DetDataSample from mmdet.s...
from docarray import Document, DocumentArray import pytest def test_add_ignore_existing_doc_id(start_storage): elastic_doc = DocumentArray( storage='elasticsearch', config={ 'n_dim': 3, 'columns': [('price', 'int')], 'distance': 'l2_norm', 'index_na...
from docarray import Document, DocumentArray def test_add_ignore_existing_doc_id(start_storage): elastic_doc = DocumentArray( storage='elasticsearch', config={ 'n_dim': 3, 'columns': [('price', 'int')], 'distance': 'l2_norm', 'index_name': 'test_add_...
import pytest from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal @pytest.mark.integration def test_available_models() -> None: models = NVIDIAMultiModal().available_models assert models assert isinstance(models, list) assert all(isinstance(model.id, str) for model in models)
import pytest from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal @pytest.mark.integration() def test_available_models() -> None: models = NVIDIAMultiModal().available_models assert models assert isinstance(models, list) assert all(isinstance(model.id, str) for model in models)
import csv import os from . import InputExample class TripletReader(object): """Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1), one positive example (s2) and one negative example (s3) """ def __init__( self, dataset_folder, s1_...
from . import InputExample import csv import gzip import os class TripletReader(object): """ Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1), one positive example (s2) and one negative example (s3) """ def __init__(self, dataset_folder, s1_col_idx=0, ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.export.saved_model import ExportArchive
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.export.export_lib import ExportArchive
import numpy as np from docarray.proto import DocumentProto, NdArrayProto, NodeProto from docarray.typing import NdArray def test_nested_item_proto(): NodeProto(text='hello') NodeProto(nested=DocumentProto()) def test_nested_optional_item_proto(): NodeProto() def test_ndarray(): nd_proto = NdArra...
import numpy as np from docarray.proto import DocumentProto, NdArrayProto, NodeProto from docarray.typing import Tensor def test_nested_item_proto(): NodeProto(text='hello') NodeProto(nested=DocumentProto()) def test_nested_optional_item_proto(): NodeProto() def test_ndarray(): nd_proto = NdArray...
from pathlib import Path from llama_index.core.bridge.pydantic import AnyUrl from llama_index.core.schema import MediaResource def test_defaults(): m = MediaResource() assert m.data is None assert m.embeddings is None assert m.mimetype is None assert m.path is None assert m.url is None def ...
from pathlib import Path from llama_index.core.bridge.pydantic import AnyUrl from llama_index.core.schema import MediaResource def test_defaults(): m = MediaResource() assert m.data is None assert m.embeddings is None assert m.mimetype is None assert m.path is None assert m.url is None def ...
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( pretrain_img_size=384, embed_dims=128, de...
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( pretrain_img_size=384, embed_dims=128, de...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from jina import Flow from PIL import Image from ...pdf_segmenter import PDFSegmenter def test_flow(test_dir, doc_generator_img_text, expected_text): flow = Flow().add(uses=PDFSegmenter) doc...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from PIL import Image from jina import Flow from ...pdf_segmenter import PDFSegmenter def test_flow(test_dir, doc_generator_img_text, expected_text): flow = Flow().add(uses=PDFSegmenter) doc_...
"""Top-level imports for LlamaIndex.""" __version__ = "0.12.48" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_in...
"""Top-level imports for LlamaIndex.""" __version__ = "0.12.47" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_in...
# type: ignore """ Development Scripts for template packages """ from collections.abc import Sequence from fastapi import FastAPI from langserve import add_routes from langchain_cli.utils.packages import get_langserve_export, get_package_root def create_demo_server( *, config_keys: Sequence[str] = (), ...
# type: ignore """ Development Scripts for template packages """ from typing import Sequence from fastapi import FastAPI from langserve import add_routes from langchain_cli.utils.packages import get_langserve_export, get_package_root def create_demo_server( *, config_keys: Sequence[str] = (), playgroun...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp def parse_args(): parser = argparse.ArgumentParser( description='Convert benchmark model json to script') parser.add_argument( 'txt_path', type=str, help='txt path output by benchmark_filter') p...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp def parse_args(): parser = argparse.ArgumentParser( description='Convert benchmark model json to script') parser.add_argument( 'txt_path', type=str, help='txt path output by benchmark_filter') p...
import torch from torch import Tensor from torch import nn from typing import Dict import os import json class WeightedLayerPooling(nn.Module): """Token embeddings are weighted mean of their different hidden layer representations""" def __init__( self, word_embedding_dimension, num_hidden_layers: int...
import torch from torch import Tensor from torch import nn from typing import Dict import os import json class WeightedLayerPooling(nn.Module): """ Token embeddings are weighted mean of their different hidden layer representations """ def __init__( self, word_embedding_dimension, num_hidden_l...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from .two_stage import TwoStageDetector @MODELS.register_module() class FastRCNN(TwoStageDetector): """Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_""" def __init__(self, backbone, ...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class FastRCNN(TwoStageDetector): """Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_""" def __init__(self, backbone, ...
from __future__ import annotations import asyncio from collections.abc import AsyncIterator from typing import Any, Literal, Union, cast from langchain_core.callbacks import AsyncCallbackHandler from langchain_core.outputs import LLMResult # TODO If used by two LLM runs in parallel this won't work as expected clas...
from __future__ import annotations import asyncio from typing import Any, AsyncIterator, Dict, List, Literal, Union, cast from langchain_core.callbacks import AsyncCallbackHandler from langchain_core.outputs import LLMResult # TODO If used by two LLM runs in parallel this won't work as expected class AsyncIterator...
# Copyright (c) OpenMMLab. All rights reserved. import pytest from mmengine.utils import ManagerMeta, ManagerMixin class SubClassA(ManagerMixin): def __init__(self, name='', *args, **kwargs): super().__init__(name, *args, **kwargs) class SubClassB(ManagerMixin): def __init__(self, name='', *args,...
# Copyright (c) OpenMMLab. All rights reserved. import pytest from mmengine.utils import ManagerMeta, ManagerMixin class SubClassA(ManagerMixin): def __init__(self, name='', *args, **kwargs): super().__init__(name, *args, **kwargs) class SubClassB(ManagerMixin): def __init__(self, name='', *args,...
from __future__ import annotations import sys from .BoW import BoW from .CLIPModel import CLIPModel from .CNN import CNN from .Dense import Dense from .Dropout import Dropout from .InputModule import InputModule from .LayerNorm import LayerNorm from .LSTM import LSTM from .Module import Module from .Normalize import ...
from __future__ import annotations from .Asym import Asym, Router from .BoW import BoW from .CLIPModel import CLIPModel from .CNN import CNN from .Dense import Dense from .Dropout import Dropout from .InputModule import InputModule from .LayerNorm import LayerNorm from .LSTM import LSTM from .Module import Module from...
from __future__ import annotations from typing import Any, List, Optional, Tuple, Union import PIL.Image import torch from torchvision.transforms import InterpolationMode from ._datapoint import Datapoint, FillTypeJIT class Mask(Datapoint): @classmethod def _wrap(cls, tensor: torch.Tensor) -> Mask: ...
from __future__ import annotations from typing import Any, List, Optional, Tuple, Union import PIL.Image import torch from torchvision.transforms import InterpolationMode from ._datapoint import Datapoint, FillTypeJIT class Mask(Datapoint): @classmethod def _wrap(cls, tensor: torch.Tensor) -> Mask: ...
from typing import Any, Optional def json_to_markdown(data: Any, level: int = 0, header: Optional[str] = None) -> str: """ Recursively converts a Python object (from JSON) into a Markdown string. Args: data: The Python object to convert. level: The current nesting level (used for indentat...
from typing import Any, Optional def json_to_markdown(data: Any, level: int = 0, header: Optional[str] = None) -> str: """ Recursively converts a Python object (from JSON) into a Markdown string. Args: data: The Python object to convert. level: The current nesting level (used for indentat...
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers import util from sentence_transformers.losses.CoSENTLoss import CoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCoSENTLoss(CoSENTLoss): ...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers import util from sentence_transformers.losses.CoSENTLoss import CoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCoSENTLoss(CoSENTLoss): ...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_panoptic.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='PanopticFPN', semantic_head=dict( type='PanopticFPNHead', num_classes=54, in_channels=256, ...
_base_ = [ '../_base_/datasets/coco_panoptic.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='PanopticFPN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm...
_base_ = ['./cascade-mask-rcnn_r50_fpn_1x_coco.py'] model = dict( data_preprocessor=dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False), backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=...
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py'] model = dict( data_preprocessor=dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False), backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=...
_base_ = './grid-rcnn_r50_fpn_gn-head_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' # Use RepeatDataset to speed up training # change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) train_dataloader = dict(dataset=dict(times=2))
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # Use RepeatDataset to speed up training # change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) train_dataloader = dict(dataset=dict(times=2))
"""ReAct output parser.""" import re from typing import Tuple from llama_index.core.agent.react.types import ( ActionReasoningStep, BaseReasoningStep, ResponseReasoningStep, ) from llama_index.core.output_parsers.utils import extract_json_str from llama_index.core.types import BaseOutputParser def extra...
"""ReAct output parser.""" import re from typing import Tuple from llama_index.core.agent.react.types import ( ActionReasoningStep, BaseReasoningStep, ResponseReasoningStep, ) from llama_index.core.output_parsers.utils import extract_json_str from llama_index.core.types import BaseOutputParser def extra...
# Copyright (c) OpenMMLab. All rights reserved. from .local_visualizer import DetLocalVisualizer from .palette import get_palette, palette_val __all__ = ['palette_val', 'get_palette', 'DetLocalVisualizer']
# Copyright (c) OpenMMLab. All rights reserved. from .image import (color_val_matplotlib, imshow_det_bboxes, imshow_gt_det_bboxes) from .palette import get_palette, palette_val __all__ = [ 'imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib', 'palette_val', 'get_palette' ]
from typing import List, Optional, TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.mimetypes import TEXT_EXTRA_EXTENSIONS, TEXT_MIMETYPE T = TypeVar('T', bound='TextUrl') @_register_proto(proto_type_name='text_url') class Tex...
from typing import Optional, TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl T = TypeVar('T', bound='TextUrl') @_register_proto(proto_type_name='text_url') class TextUrl(AnyUrl): """ URL to a text file. Can be remote (web) URL, or a local...
"""Module for Jina Requests.""" from typing import ( TYPE_CHECKING, AsyncIterable, Dict, Iterable, Iterator, Optional, Tuple, Union, ) from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch from jina.enums import DataInputType from jina.helper import ba...
"""Module for Jina Requests.""" from typing import ( Iterator, Union, Tuple, AsyncIterable, Iterable, Optional, Dict, TYPE_CHECKING, ) from jina.clients.request.helper import _new_data_request_from_batch, _new_data_request from jina.enums import DataInputType from jina.helper import ba...
"""Base argparser module for Pod and Deployment runtime""" import argparse import os from jina.enums import PollingType from jina.helper import random_identity from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group def mixin_essential_parser(parser): """Mixing in arguments required by every module into th...
"""Base argparser module for Pod and Deployment runtime""" import argparse import os from jina.enums import PollingType from jina.helper import random_identity from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group def mixin_essential_parser(parser): """Mixing in arguments required by every module into th...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api import _tf_keras from keras.api import activations from keras.api import applications from keras.api import backend from keras.api import callbacks from keras.api import config from k...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api import _tf_keras from keras.api import activations from keras.api import applications from keras.api import backend from keras.api import callbacks from keras.api import config from k...
import os as _os import sys as _sys from pathlib import Path as _Path import datetime as _datetime __windows__ = _sys.platform == 'win32' __uptime__ = _datetime.datetime.now().isoformat() # update on MacOS 1. clean this tuple, 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py # "\'JINA_.*?\'" ...
import os as _os import sys as _sys from pathlib import Path as _Path import datetime as _datetime __windows__ = _sys.platform == 'win32' __uptime__ = _datetime.datetime.now().isoformat() # update on MacOS 1. clean this tuple, 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py # "\'JINA_.*?\'" ...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_v...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_v...
import time import http.client import json from typing import List, Optional, Union from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode class GalaxiaClient: def __init_...
import time import http.client import json from typing import List, Optional, Union from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode class GalaxiaClient: def __init_...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner import force_fp32 from mmdet.models.builder import ROI_EXTRACTORS from .base_roi_extractor import BaseRoIExtractor @ROI_EXTRACTORS.register_module() class SingleRoIExtractor(BaseRoIExtractor): """Extract RoI features from a single leve...
import torch from mmcv.runner import force_fp32 from mmdet.models.builder import ROI_EXTRACTORS from .base_roi_extractor import BaseRoIExtractor @ROI_EXTRACTORS.register_module() class SingleRoIExtractor(BaseRoIExtractor): """Extract RoI features from a single level feature map. If there are multiple input ...
"""Module for Jina Requests.""" from typing import ( TYPE_CHECKING, AsyncIterable, Dict, Iterable, Iterator, Optional, Tuple, Union, ) from jina._docarray import Document from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch from jina.enums import Data...
"""Module for Jina Requests.""" from typing import ( TYPE_CHECKING, AsyncIterable, Dict, Iterable, Iterator, Optional, Tuple, Union, ) from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch from jina.enums import DataInputType from jina.helper import ba...
from __future__ import annotations import re import pytest from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import NanoBEIREvaluator from sentence_transformers.util import is_datasets_available from tests.utils import is_ci if not is_datasets_available(): pytest.skip( ...
from __future__ import annotations import re import pytest from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import NanoBEIREvaluator from sentence_transformers.util import is_datasets_available from tests.utils import is_ci if not is_datasets_available(): pytest.skip( ...
_base_ = './rpn_r50_fpn_1x_coco.py' # use caffe img_norm preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) model = dict( preprocess_cfg=preprocess_cfg, backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=Tru...
_base_ = './rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dic...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .single_stage import SingleStageDetector @MODELS.register_module() class RepPointsDetector(SingleStageDetector): """RepPoints: Point Set Representation for Objec...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class RepPointsDetector(SingleStageDetector): """RepPoints: Point Set Representation for ...
from typing import Union, Iterable from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin from docarray.array.storage.registry import _REGISTRY from docarray import Document class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like methods for DocumentArray with weaviate as storag...
from typing import Union, Iterable from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin from docarray.array.storage.registry import _REGISTRY from docarray import Document class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like methods for DocumentArray with weaviate as storag...
from typing import Any, Dict, Tuple, Union import numpy as np import PIL.Image import torch from torchvision.io.video import read_video from torchvision.prototype import features from torchvision.prototype.utils._internal import ReadOnlyTensorBuffer from torchvision.transforms import functional as _F @torch.jit.unus...
import unittest.mock from typing import Any, Dict, Tuple, Union import numpy as np import PIL.Image import torch from torchvision.io.video import read_video from torchvision.prototype import features from torchvision.prototype.utils._internal import ReadOnlyTensorBuffer from torchvision.transforms import functional as...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import Mock import torch.nn as nn from torch.optim import SGD from mmengine.hooks import RuntimeInfoHook from mmengine.logging import MessageHub from mmengine.optim import OptimWrapper, OptimWrapperDict class TestRuntim...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import Mock from mmengine.hooks import RuntimeInfoHook from mmengine.logging import MessageHub class TestRuntimeInfoHook(TestCase): def test_before_run(self): message_hub = MessageHub.get_instance( ...
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
"""Base argparser module for Pod and Deployment runtime""" import argparse import os from jina.enums import PollingType from jina.helper import random_identity from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group def mixin_essential_parser(parser): """Mixing in arguments required by every module into th...
"""Base argparser module for Pod and Deployment runtime""" import argparse import os from jina.enums import PollingType from jina.helper import random_identity from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group def mixin_essential_parser(parser): """Mixing in arguments required by every module into th...
from __future__ import annotations import torch import transformers from PIL import Image from torch import nn class CLIPModel(nn.Module): def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None: super().__init__() if processor_name is None: ...
from __future__ import annotations import torch import transformers from PIL import Image from torch import nn class CLIPModel(nn.Module): def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None: super(CLIPModel, self).__init__() if processor_name is Non...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.image.image_tensor import ImageTensor f...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.image.image_tensor import ImageTensor f...
import csv import logging import os from typing import List from scipy.stats import pearsonr, spearmanr from sentence_transformers import InputExample logger = logging.getLogger(__name__) class CECorrelationEvaluator: """ This evaluator can be used with the CrossEncoder class. Given sentence pairs and cont...
import logging from scipy.stats import pearsonr, spearmanr from typing import List import os import csv from ... import InputExample logger = logging.getLogger(__name__) class CECorrelationEvaluator: """ This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores, i...
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union import numpy as np from pydantic import Field from docarray.base_doc import BaseDoc from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.image....
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_doc import BaseDoc from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.image.image_tensor import ImageTen...
_base_ = './faster-rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='CIoULoss', loss_weight=12.0))))
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='CIoULoss', loss_weight=12.0))))
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))