input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankin... | from __future__ import annotations
from .CSRLoss import CSRLoss
from .CSRReconstructionLoss import CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import S... |
"""Copyright 2024, XGBoost contributors"""
import pytest
from dask_cuda import LocalCUDACluster
from distributed import Client
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost.testing.dask import check_external_memory
@pytest.mark.parametrize("is_qdm", [True, False])
def test_external_memory(is_q... | """Copyright 2024, XGBoost contributors"""
import pytest
from dask_cuda import LocalCUDACluster
from distributed import Client
import xgboost as xgb
from xgboost.testing.dask import check_external_memory
@pytest.mark.parametrize("is_qdm", [True, False])
def test_external_memory(is_qdm: bool) -> None:
n_workers ... |
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessa... | from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessa... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='DDOD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rg... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='DDOD',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=... |
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from senten... | from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from senten... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import DocumentArray, Executor, requests
from jinahub.indexers.searcher.FaissSearcher import FaissSearcher
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import DocumentArray, Executor, requests
from jinahub.indexers.searcher.FaissSearcher import FaissSearcher
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
... |
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocList v1.
It can be useful to sta... | from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocList v1.
It can be useful to sta... |
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Dict, Callable
import pytest
from jina import DocumentArray
from ...transform_encoder import TransformerTorchEncoder
MODELS_TO_TEST = [
'sentence-transformers/distilbert-base-nli-stsb-mean... | __copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Dict, Callable
import pytest
from jina import DocumentArray
from jinahub.encoder.transform_encoder import TransformerTorchEncoder
MODELS_TO_TEST = [
'sentence-transformers/distilbert-base-... |
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_la... | from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_la... |
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
L... | from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
L... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'./retinanet_tta.py'
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_tar
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "w... | import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive":... |
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/19... | from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .ohem_sampler import OHEMSampler
from .pseudo_sampler... | from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler impor... |
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import gzip
import os
import tarfile
impo... | """
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import os
import sentence_transformers
imp... |
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode... | import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode... |
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatB... | # Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatB... |
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primar... | from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
... |
"""
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least r... | """
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least r... |
"""
LocalAI is a free, open source, and self-hosted OpenAI alternative.
Docs: https://localai.io/
Source: https://github.com/go-skynet/LocalAI
"""
import warnings
from types import MappingProxyType
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LL... | """
LocalAI is a free, open source, and self-hosted OpenAI alternative.
Docs: https://localai.io/
Source: https://github.com/go-skynet/LocalAI
"""
import warnings
from types import MappingProxyType
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LL... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
... |
from typing import Union
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
tf_av... | from typing import Union
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
tf_available = ... |
from typing import TYPE_CHECKING, Union, BinaryIO
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING:
from docarray.typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blo... | from typing import TYPE_CHECKING, Union, BinaryIO
from .helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING:
from ...typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blob(self: 'T') -> 'T':
"... |
from pathlib import Path
from typing import List
import numpy as np
import pytest
import scipy
from jina import Document, DocumentArray, Executor
from jina.excepts import PretrainedModelFileDoesNotExist
from ...tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.fixture(scope='session')
def... | from pathlib import Path
import numpy as np
import scipy
from jina import Document, DocumentArray, Executor
from ...tfidf_text_executor import TFIDFTextEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.path_vectorizer.endswith('tfidf_vectorizer.pic... |
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... | import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... |
"""Google Finance API Toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising d... | """Google Finance API Toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising d... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOO... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOO... |
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .cspnext_pafpn import CSPNeXtPAFPN
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_caraf... | # Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .cspnext_pafpn import CSPNeXtPAFPN
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_caraf... |
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.outp... | from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.outp... |
import torch
from torchaudio.models import emformer_rnnt_model, RNNTBeamSearch
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class RNNTBeamSearchTestImpl(TestBaseMixin):
def _get_input_config(self):
model_config = self._get_model_config()
return {
"batch_size... | import torch
from torchaudio.models import emformer_rnnt_model, RNNTBeamSearch
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class RNNTBeamSearchTestImpl(TestBaseMixin):
def _get_input_config(self):
model_config = self._get_model_config()
return {
"batch_size... |
from __future__ import annotations
from typing import Callable
try:
from typing import Self
except ImportError:
from typing_extensions import Self
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
from sentence_transformers.util import fullname, import_from_string
class D... | from __future__ import annotations
import json
import os
from typing import Callable
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_... |
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, ImageUrl
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedd... | from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyTensor, Embedding, ImageUrl
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an Embedding (`Image.embedding`).... |
from pathlib import Path
import numpy as np
import paddlehub as hub
import pytest
from jina import Document, DocumentArray, Executor
from text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():
... | from pathlib import Path
import numpy as np
import paddlehub as hub
import pytest
from jina import Document, DocumentArray, Executor
from ...text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.applications import convnext as convnext
from keras.applications import densenet as densenet
from keras.applications import efficientnet as efficientnet
from keras.applications import eff... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.applications import convnext
from keras.api.applications import densenet
from keras.api.applications import efficientnet
from keras.api.applications import efficientnet_v2
from keras.... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, ... | # Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, ... |
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import EarlyStopException, early_stopping, log_evaluation, record_evaluatio... | # coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import EarlyStopException, early_stopping, log_evaluation, record_evaluation... |
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an a... | """
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an a... |
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(Sentenc... | from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(Sentenc... |
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:... | """
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:... |
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
__all__ = ['DropBlock']
| from .dropblock import DropBlock
__all__ = ['DropBlock']
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet import p... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet import ResNet50
from keras.src.applications.resnet import decode_predictions
from keras.src.applications.resnet import preprocess_input
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str... | from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... |
import os
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.tools import tool
from pydantic import BaseModel
from langchain_community.chat_models import MiniMaxChat
def test_chat_minimax_not_group_id() -> None:
if "MINIMAX_GROUP_ID" in os.environ:
del os.enviro... | import os
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.tools import tool
from pydantic import BaseModel
from langchain_community.chat_models import MiniMaxChat
def test_chat_minimax_not_group_id() -> None:
if "MINIMAX_GROUP_ID" in os.environ:
del os.enviro... |
_base_ = './detic_centernet2_r50_fpn_4x_lvis_boxsup.py'
dataset_type = ['LVISV1Dataset', 'ImageNetLVISV1Dataset']
image_size_det = (640, 640)
image_size_cls = (320, 320)
# backend = 'pillow'
backend_args = None
train_pipeline_det = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAn... | _base_ = './detic_centernet2_r50_fpn_4x_lvis_boxsup.py'
image_size_det = (640, 640)
image_size_cls = (320, 320)
# backend = 'pillow'
backend_args = None
train_pipeline_det = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
... |
_base_ = 'deformable-detr_refine_r50_16xb2-50e_coco.py'
model = dict(bbox_head=dict(as_two_stage=True))
| _base_ = 'deformable_detr_refine_r50_16x2_50e_coco.py'
model = dict(bbox_head=dict(as_two_stage=True))
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydan... | import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydan... |
"""
This is an example how to train SentenceTransformers in a multi-task setup.
The system trains BERT on the AllNLI and on the STSbenchmark dataset.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTrans... | """
This is an example how to train SentenceTransformers in a multi-task setup.
The system trains BERT on the AllNLI and on the STSbenchmark dataset.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransf... |
from docarray.typing.id import ID
from docarray.typing.tensor import NdArray, Tensor
from docarray.typing.tensor.embedding import Embedding
from docarray.typing.url import AnyUrl, ImageUrl, TextUrl
__all__ = [
'NdArray',
'Embedding',
'ImageUrl',
'TextUrl',
'AnyUrl',
'ID',
'Tensor',
]
try:
... | from docarray.typing.id import ID
from docarray.typing.tensor import NdArray, Tensor, TorchEmbedding, TorchTensor
from docarray.typing.tensor.embedding import Embedding
from docarray.typing.url import AnyUrl, ImageUrl, TextUrl
__all__ = [
'TorchTensor',
'NdArray',
'Embedding',
'ImageUrl',
'TextUrl'... |
import posixpath
from pathlib import Path
from unittest.mock import patch
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
from fsspec.registry import _registry as _fsspec_registry
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __ini... | import posixpath
from pathlib import Path
from unittest.mock import patch
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
from fsspec.registry import _registry as _fsspec_registry
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __ini... |
import unittest
import torch
from mmengine.data import PixelData
from mmengine.testing import assert_allclose
from mmdet.data_elements import DetDataSample
from mmdet.models.seg_heads import PanopticFPNHead
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHea... | import unittest
import torch
from mmengine.data import PixelData
from mmengine.testing import assert_allclose
from mmdet.core.data_structures import DetDataSample
from mmdet.models.seg_heads import PanopticFPNHead
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = Panopti... |
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'b... | # Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'b... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='MMDetec... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='MMDetec... |
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers.
It generates sentence embeddings that can be compared using cosine-simi... | """
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers.
It generates sentence embeddings that can be compared using cosine-simi... |
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then k-mean clustering is applied.
"""
from sklearn.cluster import KMeans
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with examp... | """
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then k-mean clustering is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
embedder = SentenceTransformer('all-MiniLM-L6-v2')
# Corpus with example... |
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_g... | """
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_g... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('do... | import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('do... |
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... |
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_tar
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive... | import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_arch... |
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
... | _base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
... |
import os
import platform
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux") and pla... | import os
import platform
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux") and pla... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders ... | import csv
import logging
import os
from typing import List
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or mo... |
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
This example presents how to estimate and visualize the variance of the Receiver
Operating Characteristic (ROC) metric using cros... | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
This example presents how to estimate and visualize the variance of the Receiver
Operating Characteristic (ROC) metric using cros... |
from docarray.base_document.mixins.io import IOMixin
from docarray.base_document.mixins.update import UpdateMixin
__all__ = ['IOMixin', 'UpdateMixin']
| from docarray.base_document.mixins.proto import ProtoMixin
from docarray.base_document.mixins.update import UpdateMixin
__all__ = ['ProtoMixin', 'UpdateMixin']
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
"""
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimens... | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimens... |
# coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requir... | # coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requir... |
"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format.
"""
import logging
from torch.nn import Module
from ..model import wav2vec2_model, Wav2Vec2Model
_LG = logging.getLogger(__name__)
def _get_config(cfg):
config = {
"extractor_mode": f"{cfg.feat_extract_norm}_no... | """Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format.
"""
import logging
from torch.nn import Module
from ..model import wav2vec2_model, Wav2Vec2Model
_LG = logging.getLogger(__name__)
def _get_config(cfg):
config = {
"extractor_mode": f"{cfg.feat_extract_norm}_no... |
"""Read PDF files using PyMuPDF library."""
from pathlib import Path
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PyMuPDFReader(BaseReader):
"""Read PDF files using PyMuPDF library."""
def load_data(
... | """Read PDF files using PyMuPDF library."""
from pathlib import Path
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PyMuPDFReader(BaseReader):
"""Read PDF files using PyMuPDF library."""
def load_data(
... |
import torch
from parameterized import parameterized
from torchaudio.prototype.models import squim_objective_base
from torchaudio_unittest.common_utils import skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSQUIM(TorchaudioTestCase):
def _smoke_test_objective(self, model, device, dtype):
model = ... | import torch
from parameterized import parameterized
from torchaudio.prototype.models import squim_objective_base
from torchaudio_unittest.common_utils import skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSQUIM(TorchaudioTestCase):
def _smoke_test_objective(self, model, device, dtype):
model = ... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.loss... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.loss... |
import pathlib
from argparse import ArgumentParser
def main(args):
wheel_path = pathlib.Path(args.wheel_path).expanduser().resolve()
if not wheel_path.exists():
raise ValueError(f"Wheel cannot be found at path {wheel_path}")
if not wheel_path.is_file():
raise ValueError(f"Path {wheel_path}... | import pathlib
from argparse import ArgumentParser
def main(args):
wheel_path = pathlib.Path(args.wheel_path).expanduser().resolve()
if not wheel_path.exists():
raise ValueError(f"Wheel cannot be found at path {wheel_path}")
if not wheel_path.is_file():
raise ValueError(f"Path {wheel_path}... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
import subprocess
import pytest
from dpr_text import DPRTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') f... | import subprocess
import pytest
from dpr_text import DPRTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') f... |
from typing import List, Optional
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.schema import NodeWithScore, QueryType
from llama_index.core.bridge.pydantic import ConfigDict
class ReRankStartEvent(BaseEvent):
"""
ReRankStartEvent.
Args:
query (QueryTyp... | from typing import List, Optional
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.schema import NodeWithScore, QueryType
from llama_index.core.bridge.pydantic import ConfigDict
class ReRankStartEvent(BaseEvent):
"""ReRankStartEvent.
Args:
query (QueryType): Q... |
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
em... | _base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
emb... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opt... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opt... |
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns... | import logging
import os
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string ... |
__version__ = '0.16.3'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.16.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
class DataAdapter:
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unfied interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
... | class DataAdapter:
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unfied interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
... |
from typing import Union
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from langchain_... | from typing import Union
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from langchain_... |
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-... | import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | import numpy as np
from docarray import BaseDoc
from docarray.typing import NdArray
def test_set_tensor():
class MyDocument(BaseDoc):
tensor: NdArray
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.ten... |
import argparse
import os.path as osp
from mmengine.fileio import dump, load
def parse_args():
parser = argparse.ArgumentParser(
description='Generate COCO test image information '
'for COCO panoptic segmentation.')
parser.add_argument('data_root', help='Path to COCO annotation directory.')
... | import argparse
import os.path as osp
import mmcv
def parse_args():
parser = argparse.ArgumentParser(
description='Generate COCO test image information '
'for COCO panoptic segmentation.')
parser.add_argument('data_root', help='Path to COCO annotation directory.')
args = parser.parse_args... |
import os
import pytest
from docarray import Document
from jina import Executor, Flow, requests
class MyExec(Executor):
@requests
def foo(self, docs, **kwargs):
pass
@pytest.fixture
def cert_prefix():
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/../../../unit/ser... | import os
import pytest
from docarray import Document
from jina import Executor, Flow, requests
class MyExec(Executor):
@requests
def foo(self, docs, **kwargs):
pass
@pytest.fixture
def cert_prefix():
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/../../../unit/ser... |
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal
@_register_explicit_noop(
PIL.Image.Image, datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask, warn_pas... | import PIL.Image
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal, is_simple_tensor
@_register_explicit_noop(
PIL.Image.Image, datapoints.Image, datapoints.BoundingBoxes, datapoi... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import OBJ_MIMETYPE
from docarray.utils._internal.misc import import_library
if TYPE_CH... |
from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union
from pydantic import BaseModel, validator
from docarray.math.ndarray import to_list
if TYPE_CHECKING:
from docarray.typing import ArrayType
# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unions
_ProtoValueType =... | from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union
from pydantic import BaseModel, validator
from ..math.ndarray import to_list
if TYPE_CHECKING:
from ..typing import ArrayType
# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unions
_ProtoValueType = Optional[Unio... |
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
... | # model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
dept... |
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
... | from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
if not is_datasets_available():
pytest.skip(
reason="Datasets are n... |
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import Embedding, ImageUrl, Tensor
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), a Tensor (`Image.tensor`),
and an Embedding (`Image.embedding`).
E... | from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import Embedding, ImageUrl, Tensor
class Image(BaseDocument):
"""
base Document for Image handling
"""
uri: Optional[ImageUrl]
tensor: Optional[Tensor]
embedding: Optional[Embedding]
|
from typing import (
TYPE_CHECKING,
Sequence,
)
import numpy as np
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import (
DocumentArrayIndexType,
)
class DelItemMixin:
"""Provide help function to enable advanced indexing in `__delitem__`"""
def __delit... | from typing import (
TYPE_CHECKING,
Sequence,
)
import numpy as np
from ...helper import typename
if TYPE_CHECKING:
from ...typing import (
DocumentArrayIndexType,
)
class DelItemMixin:
"""Provide help function to enable advanced indexing in `__delitem__`"""
def __delitem__(self, i... |
from enum import Enum
from typing import TYPE_CHECKING, Union, overload
import numpy as np
if TYPE_CHECKING:
import torch
class Pooling(str, Enum):
"""Enum of possible pooling choices with pooling behaviors."""
CLS = "cls"
MEAN = "mean"
LAST = "last" # last token pooling
def __call__(self... | from enum import Enum
from typing import TYPE_CHECKING, Union, overload
import numpy as np
if TYPE_CHECKING:
import torch
class Pooling(str, Enum):
"""Enum of possible pooling choices with pooling behaviors."""
CLS = "cls"
MEAN = "mean"
LAST = "last" # last token pooling
def __call__(self... |
_base_ = './yolox_s_8xb8-300e_coco.py'
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 640),
size_divisor=32,
interval=10)
]),
backbone=dict(deepen_factor=0.33, widen_f... | _base_ = './yolox_s_8xb8-300e_coco.py'
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 640),
size_divisor=32,
interval=10)
]),
backbone=dict(deepen_factor=0.33, widen_f... |
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
... | _base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases a... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class Empt... |
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.datapoints._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str... | from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from ._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str]]
@classmethod
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.