input
stringlengths
33
5k
output
stringlengths
32
5k
# Copyright (c) OpenMMLab. All rights reserved. import pytest from mmdet.datasets import get_loading_pipeline, replace_ImageToTensor def test_replace_ImageToTensor(): # with MultiScaleFlipAug pipelines = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', ...
import pytest from mmdet.datasets import get_loading_pipeline, replace_ImageToTensor def test_replace_ImageToTensor(): # with MultiScaleFlipAug pipelines = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, ...
import torch import torchaudio.functional as F from parameterized import parameterized from torchaudio_unittest.common_utils import ( get_sinusoid, load_params, save_wav, skipIfNoExec, TempDirMixin, TestBaseMixin, ) from torchaudio_unittest.common_utils.kaldi_utils import convert_args, run_kaldi...
import torch import torchaudio.functional as F from parameterized import parameterized from torchaudio_unittest.common_utils import ( get_sinusoid, load_params, save_wav, skipIfNoExec, TempDirMixin, TestBaseMixin, ) from torchaudio_unittest.common_utils.kaldi_utils import ( convert_args, ...
from contextlib import suppress from docutils import nodes from docutils.parsers.rst import Directive from sklearn.utils import all_estimators from sklearn.utils._test_common.instance_generator import _construct_instance from sklearn.utils._testing import SkipTest class AllowNanEstimators(Directive): @staticmet...
from contextlib import suppress from docutils import nodes from docutils.parsers.rst import Directive from sklearn.utils import all_estimators from sklearn.utils._testing import SkipTest from sklearn.utils.estimator_checks import _construct_instance class AllowNanEstimators(Directive): @staticmethod def mak...
import os import numpy as np import pytest import torch from pydantic.tools import parse_obj_as from docarray import BaseDoc from docarray.typing import ( AudioNdArray, AudioTorchTensor, VideoNdArray, VideoTorchTensor, ) from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_a...
import os import numpy as np import pytest import torch from pydantic.tools import parse_obj_as from docarray import BaseDoc from docarray.typing import ( AudioNdArray, AudioTorchTensor, VideoNdArray, VideoTorchTensor, ) from docarray.utils.misc import is_tf_available tf_available = is_tf_available()...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores import MyScale, MyScaleSettings from langchain_community.vectorstores.myscale import MyScaleWithoutJSON # Create a way to dynamically look up deprecated imports. # Used to ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores import MyScale, MyScaleSettings from langchain_community.vectorstores.myscale import MyScaleWithoutJSON # Create a way to dynamically look up deprecated imports. # Used to ...
from langchain_core.documents import Document from langchain.retrievers.document_compressors.listwise_rerank import LLMListwiseRerank def test_list_rerank() -> None: from langchain_openai import ChatOpenAI documents = [ Document("Sally is my friend from school"), Document("Steve is my friend...
from langchain_core.documents import Document from langchain.retrievers.document_compressors.listwise_rerank import LLMListwiseRerank def test_list_rerank() -> None: from langchain_openai import ChatOpenAI documents = [ Document("Sally is my friend from school"), Document("Steve is my friend...
from llama_index.core.schema import Document from llama_index.core.tools.tool_spec.base import BaseToolSpec from box_sdk_gen import BoxClient from llama_index.readers.box.BoxAPI.box_api import ( box_check_connection, get_box_files_details, get_files_ai_extract_data, add_extra_header_to_box_client, ) ...
from llama_index.core.schema import Document from llama_index.core.tools.tool_spec.base import BaseToolSpec from box_sdk_gen import BoxClient from llama_index.readers.box.BoxAPI.box_api import ( box_check_connection, get_box_files_details, get_files_ai_extract_data, add_extra_header_to_box_client, ) ...
import os.path from typing import Any, Callable, List, Optional, Tuple from PIL import Image from .vision import VisionDataset class CocoDetection(VisionDataset): """`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset. It requires the `COCO API to be installed <https://github.com/pdollar...
import os.path from typing import Any, Callable, List, Optional, Tuple from PIL import Image from .vision import VisionDataset class CocoDetection(VisionDataset): """`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset. It requires the `COCO API to be installed <https://github.com/pdollar...
import os import pytest from llama_index.core.tools.tool_spec.base import BaseToolSpec from llama_index.tools.mcp import BasicMCPClient, McpToolSpec # Path to the test server script - adjust as needed SERVER_SCRIPT = os.path.join(os.path.dirname(__file__), "server.py") @pytest.fixture(scope="session") def client() ...
from llama_index.core.tools.tool_spec.base import BaseToolSpec from llama_index.tools.mcp import McpToolSpec def test_class(): names_of_base_classes = [b.__name__ for b in McpToolSpec.__mro__] assert BaseToolSpec.__name__ in names_of_base_classes
import pytest from xgboost import testing as tm pytestmark = [ pytest.mark.skipif(**tm.no_spark()), tm.timeout(120), ] from ..test_with_spark.test_data import run_dmatrix_ctor @pytest.mark.skipif(**tm.no_cudf()) @pytest.mark.parametrize( "is_feature_cols,is_qdm", [(True, True), (True, False), (Fals...
import pytest from xgboost import testing as tm pytestmark = pytest.mark.skipif(**tm.no_spark()) from ..test_with_spark.test_data import run_dmatrix_ctor @pytest.mark.skipif(**tm.no_cudf()) @pytest.mark.parametrize( "is_feature_cols,is_qdm", [(True, True), (True, False), (False, True), (False, False)], ) d...
from contextlib import asynccontextmanager from datetime import timedelta from typing import Optional, List, Dict from urllib.parse import urlparse from mcp.client.session import ClientSession from mcp.client.sse import sse_client from mcp.client.stdio import stdio_client, StdioServerParameters class BasicMCPClient(...
from contextlib import asynccontextmanager from datetime import timedelta from typing import Optional, List, Dict from urllib.parse import urlparse from mcp.client.session import ClientSession from mcp.client.sse import sse_client from mcp.client.stdio import stdio_client, StdioServerParameters class BasicMCPClient(...
import json import os from typing import List import torch from torch import nn class LSTM(nn.Module): """Bidirectional LSTM running over word embeddings.""" def __init__( self, word_embedding_dimension: int, hidden_dim: int, num_layers: int = 1, dropout: float = 0, ...
import torch from torch import nn from typing import List import os import json class LSTM(nn.Module): """Bidirectional LSTM running over word embeddings.""" def __init__( self, word_embedding_dimension: int, hidden_dim: int, num_layers: int = 1, dropout: float = 0, ...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) # MMEngine support the following two ways, users can choose # according to convenience # optim_wrapper = dict...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.)
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .logger import get_root_logger from .misc import find_latest_checkpoint from .setup_env import setup_multi_processes __all__ = [ 'get_root_logger', 'collect_env', 'find_latest_checkpoint', 'setup_multi_processes' ]
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .logger import get_root_logger from .misc import find_latest_checkpoint __all__ = [ 'get_root_logger', 'collect_env', 'find_latest_checkpoint', ]
from __future__ import annotations import csv import os from . import InputExample class TripletReader(object): """Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1), one positive example (s2) and one negative example (s3) """ def __init__( self,...
import csv import os from . import InputExample class TripletReader(object): """Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1), one positive example (s2) and one negative example (s3) """ def __init__( self, dataset_folder, s1_...
import PIL.Image import torch from torchvision import datapoints from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_kernel_internal def erase( inpt: torch.Tensor, i: int, j: int, h: in...
import PIL.Image import torch from torchvision import datapoints from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal @_register_explicit_noop(datapoints.Mask, d...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import subprocess import torch from mmengine.logging import print_log def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('in_file', help='input checkpoint filename'...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import subprocess import torch def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('in_file', help='input checkpoint filename') parser.add_argument('out_file', h...
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response from .functional import add_noise, barkscale_fbanks, convolve, deemphasis, fftconvolve, preemphasis, speed __all__ = [ "add_noise", "adsr_envelope", "barkscale_fbanks", "convolve", "deemphasis", "extend_pitch"...
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve, speed __all__ = [ "add_noise", "adsr_envelope", "barkscale_fbanks", "convolve", "extend_pitch", "fftconvolve", "oscillator_bank",...
# Copyright (c) OpenMMLab. All rights reserved. from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, OptimWrapperDict, ZeroRedundancyOptim...
# Copyright (c) OpenMMLab. All rights reserved. from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper, DeepSpeedOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, OptimWrapperDi...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.linalg import cholesky from keras.src.ops.linalg import det from keras.src.ops.linalg import eig from keras.src.ops.linalg import eigh from keras.src.ops.linalg import inv from ke...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.linalg import cholesky from keras.src.ops.linalg import det from keras.src.ops.linalg import eig from keras.src.ops.linalg import eigh from keras.src.ops.linalg import inv from ke...
from typing import Any, Optional, Sequence from langchain_core._api.deprecation import deprecated from langchain_core.documents import BaseDocumentTransformer, Document from langchain_community.utilities.vertexai import get_client_info @deprecated( since="0.0.32", removal="1.0", alternative_import="lang...
from typing import Any, Optional, Sequence from langchain_core._api.deprecation import deprecated from langchain_core.documents import BaseDocumentTransformer, Document from langchain_community.utilities.vertexai import get_client_info @deprecated( since="0.0.32", removal="1.0", alternative_import="lang...
from __future__ import annotations from typing import Any from langchain_core.output_parsers import BaseOutputParser from langchain_core.output_parsers.json import parse_and_check_json_markdown from pydantic import BaseModel from langchain.output_parsers.format_instructions import ( STRUCTURED_FORMAT_INSTRUCTION...
from __future__ import annotations from typing import Any from langchain_core.output_parsers import BaseOutputParser from langchain_core.output_parsers.json import parse_and_check_json_markdown from pydantic import BaseModel from langchain.output_parsers.format_instructions import ( STRUCTURED_FORMAT_INSTRUCTION...
import os import grpc import pytest from jina import Flow, __default_host__ from jina.clients import Client from jina.excepts import PortAlreadyUsed from jina.helper import is_port_free from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGatewayRuntime from jina.serve.runtimes.helper import _get_g...
import os import grpc import pytest from jina import Flow, __default_host__ from jina.clients import Client from jina.excepts import PortAlreadyUsed from jina.helper import is_port_free from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGatewayRuntime from tests import random_docs @pytest.fixtu...
from backend.data.credit import get_user_credit_model from backend.data.execution import ( ExecutionResult, NodeExecutionEntry, RedisExecutionEventBus, create_graph_execution, get_execution_results, get_incomplete_executions, get_latest_execution, update_execution_status, update_grap...
from backend.data.credit import get_user_credit_model from backend.data.execution import ( ExecutionResult, NodeExecutionEntry, RedisExecutionEventBus, create_graph_execution, get_execution_results, get_incomplete_executions, get_latest_execution, update_execution_status, update_grap...
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py' # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(...
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py' # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.dtype_policies import deserialize as deserialize from keras.src.dtype_policies import get as get from keras.src.dtype_policies import serialize as serialize from keras.src.dtype_polic...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.dtype_policies import deserialize from keras.src.dtype_policies import get from keras.src.dtype_policies import serialize from keras.src.dtype_policies.dtype_policy import DTypePolicy...
import json import multiprocessing import os import time import pytest from jina.helper import random_port from jina.parsers import set_gateway_parser, set_pod_parser from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes.worker import WorkerRuntime from tests.helper import ( ProcessExecu...
import json import multiprocessing import os import time import pytest from docarray import DocumentArray from jina import Executor, requests from jina.helper import random_port from jina.parsers import set_gateway_parser, set_pod_parser from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes....
from typing import List import numpy as np from torch.utils.data import Dataset from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available from sentence_transformers.readers.InputExample import InputExample class DenoisingAutoEncoderDataset(Dataset): """ The DenoisingAutoEncoderDataset...
from torch.utils.data import Dataset from typing import List from ..readers.InputExample import InputExample import numpy as np from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR class DenoisingAutoEncoderDataset(Dataset): """ The DenoisingAutoEncoderDataset returns InputExamples...
import asyncio from typing import AsyncIterator, Iterator, Optional, Union from jina.helper import get_or_reuse_loop class RequestsCounter: """Class used to wrap a count integer so that it can be updated inside methods. .. code-block:: python def count_increment(i: int, rc: RequestCounter): ...
import asyncio from typing import AsyncIterator, Iterator, Optional, Union from jina.helper import get_or_reuse_loop class RequestsCounter: """Class used to wrap a count integer so that it can be updated inside methods. .. code-block:: python def count_increment(i: int, rc: RequestCounter): ...
# Copyright (c) OpenMMLab. All rights reserved. from .class_aware_sampler import ClassAwareSampler from .distributed_sampler import DistributedSampler from .group_sampler import DistributedGroupSampler, GroupSampler from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler __all__ = [ 'Distribu...
# Copyright (c) OpenMMLab. All rights reserved. from .distributed_sampler import DistributedSampler from .group_sampler import DistributedGroupSampler, GroupSampler from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler __all__ = [ 'DistributedSampler', 'DistributedGroupSampler', 'GroupSampl...
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .compat_config import compat_cfg from .logger import get_caller_name, get_root_logger, log_img_scale from .misc import find_latest_checkpoint, update_data_root from .setup_env import setup_multi_processes from .split_batch import ...
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .compat_config import compat_cfg from .logger import get_caller_name, get_root_logger, log_img_scale from .misc import find_latest_checkpoint, update_data_root from .setup_env import setup_multi_processes from .split_batch import ...
""" This is a simple application for sentence embeddings: clustering Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied. """ from sentence_transformers import SentenceTransformer from sklearn.cluster import AgglomerativeClustering embedder = SentenceTransformer(...
""" This is a simple application for sentence embeddings: clustering Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied. """ from sentence_transformers import SentenceTransformer from sklearn.cluster import AgglomerativeClustering embedder = SentenceTransformer(...
import re import unicodedata import regex # non-ASCII letters that are not separated by "NFKD" normalization ADDITIONAL_DIACRITICS = { "œ": "oe", "Œ": "OE", "ø": "o", "Ø": "O", "æ": "ae", "Æ": "AE", "ß": "ss", "ẞ": "SS", "đ": "d", "Đ": "D", "ð": "d", "Ð": "D", "þ": ...
import re import unicodedata import regex # non-ASCII letters that are not separated by "NFKD" normalization ADDITIONAL_DIACRITICS = { "œ": "oe", "Œ": "OE", "ø": "o", "Ø": "O", "æ": "ae", "Æ": "AE", "ß": "ss", "ẞ": "SS", "đ": "d", "Đ": "D", "ð": "d", "Ð": "D", "þ": ...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow T = TypeVar('T', bound='ImageTensorFlowTensor') @_register_pr...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow T = TypeVar('T', bound='ImageTensorFlowTensor') @_register_pr...
from pathlib import Path from typing import List, Tuple, Union import torch import torchaudio from torch.utils.data import Dataset SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]] _TASKS_TO_MIXTURE = { "sep_clean": "mix_clean", "enh_single": "mix_single", "enh_both": "mix_both", "sep_noisy":...
from pathlib import Path from typing import List, Tuple, Union import torch import torchaudio from torch.utils.data import Dataset SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]] class LibriMix(Dataset): r"""*LibriMix* :cite:`cosentino2020librimix` dataset. Args: root (str or Path): The p...
from typing import Any, Dict, List, Optional, Sequence, Type, Union import PIL.Image import torch from torchvision import tv_tensors from torchvision.prototype.tv_tensors import Label, OneHotLabel from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import ( _Fill...
from typing import Any, Dict, List, Optional, Sequence, Type, Union import PIL.Image import torch from torchvision import datapoints from torchvision.prototype.datapoints import Label, OneHotLabel from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import ( _Fill...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings input_size = 300 train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( ...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings input_size = 300 train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( ...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) # runtime settings max_epochs = 15 train_c...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) # runtime settings runner = dict(type='Epo...
from langchain_core.documents import Document from langchain_core.retrievers import BaseRetriever class SequentialRetriever(BaseRetriever): """Test util that returns a sequence of documents""" sequential_responses: list[list[Document]] response_index: int = 0 def _get_relevant_documents( # type: ig...
from langchain_core.retrievers import BaseRetriever, Document class SequentialRetriever(BaseRetriever): """Test util that returns a sequence of documents""" sequential_responses: list[list[Document]] response_index: int = 0 def _get_relevant_documents( # type: ignore[override] self, ...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch import torch.nn as nn from mmcv import ops from mmengine.model import BaseModule class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): """Base class for RoI extractor. Args: roi_layer (dict): Specif...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch import torch.nn as nn from mmcv import ops from mmcv.runner import BaseModule class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): """Base class for RoI extractor. Args: roi_layer (dict): Specify R...
_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( depths=depths, init_cfg=dict(type='Pretrained', ...
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( depths=depths, init_cfg=dict(type='Pretrained', ...
from ._bounding_box import BoundingBox, BoundingBoxFormat from ._encoded import EncodedData, EncodedImage from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor from ._image import ColorSpace, Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT from ._label impo...
from ._bounding_box import BoundingBox, BoundingBoxFormat from ._encoded import EncodedData, EncodedImage from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor from ._image import ( ColorSpace, Image, ImageType, ImageTypeJIT, LegacyImageType, LegacyImag...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.inception_resnet_v2 import ( InceptionResNetV2 as InceptionResNetV2, ) from keras.src.applications.inception_resnet_v2 import ( decode_predictions as decode_predi...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.inception_resnet_v2 import InceptionResNetV2 from keras.src.applications.inception_resnet_v2 import decode_predictions from keras.src.applications.inception_resnet_v2 imp...
"""Test the loading function for evaluators.""" from typing import List import pytest from langchain.evaluation.loading import EvaluatorType, load_evaluators from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator from langchain_core.embeddings import FakeEmbeddings from tests.unit_tests.llm...
"""Test the loading function for evaluators.""" from typing import List import pytest from langchain.evaluation.loading import EvaluatorType, load_evaluators from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator from langchain_core.embeddings import FakeEmbeddings from tests.unit_tests.llm...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import os import time import uuid import pytest @pytest.fixture(scope='session', autouse=True) def start_redis(): os.system( 'docker run --name redis-stack-server -p 6379:6379 -d redis/redis-stack-server:7.2.0-RC2' ) time.sleep(1) yield os.system('docker rm -f redis-stack-server') @pyt...
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config, load_dataset_builder from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset ...
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from data...
import json import logging from enum import Enum from typing import Any from requests.exceptions import HTTPError, RequestException from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request import requests logger = logging.getLo...
import json import logging from enum import Enum from typing import Any from requests.exceptions import HTTPError, RequestException from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request import requests logger = logging.getLo...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
tta_model = dict( type='DetTTAModel', tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) img_scales = [(640, 640), (320, 320), (960, 960)] tta_pipeline = [ dict(type='LoadImageFromFile', backend_args=None), dict( type='TestTimeAug', transforms=[ [ ...
tta_model = dict( type='DetTTAModel', tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) img_scales = [(640, 640), (320, 320), (960, 960)] tta_pipeline = [ dict(type='LoadImageFromFile', backend_args=None), dict( type='TestTimeAug', transforms=[ [ ...
import warnings from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.filetypes import AUDIO_FILE_...
import warnings from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.filetypes import AUDIO_FILE_...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class GridRCNN(TwoStageDetector): """Grid R-CNN. This detector is the implementation of: - Grid R-CNN (https://arxiv.org/abs/1811.12030) - Grid R-CNN Pl...
from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class GridRCNN(TwoStageDetector): """Grid R-CNN. This detector is the implementation of: - Grid R-CNN (https://arxiv.org/abs/1811.12030) - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/190...
import warnings from typing import TYPE_CHECKING, Any, Type, TypeVar, Union from docarray.typing.bytes.video_bytes import VideoLoadResult from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.utils._internal.misc import is_notebook if TYPE_CHECKING: ...
import warnings from typing import TYPE_CHECKING, Any, Type, TypeVar, Union from docarray.typing.bytes.video_bytes import VideoLoadResult from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.utils.misc import is_notebook if TYPE_CHECKING: from pyd...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from mmengine.config import ConfigDict from mmengine.data import InstanceData from parameterized import parameterized from mmdet.models.roi_heads.mask_heads import GridHead from mmdet.models.utils import unpack_...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from mmengine.config import ConfigDict from mmengine.data import InstanceData from parameterized import parameterized from mmdet.models.roi_heads.mask_heads import GridHead from mmdet.models.utils import unpack_...
from ._conformer_wav2vec2 import ( conformer_wav2vec2_base, conformer_wav2vec2_model, conformer_wav2vec2_pretrain_base, conformer_wav2vec2_pretrain_large, conformer_wav2vec2_pretrain_model, ConformerWav2Vec2PretrainModel, ) from ._emformer_hubert import emformer_hubert_base, emformer_hubert_mode...
from ._conformer_wav2vec2 import ( conformer_wav2vec2_base, conformer_wav2vec2_model, conformer_wav2vec2_pretrain_base, conformer_wav2vec2_pretrain_large, conformer_wav2vec2_pretrain_model, ConformerWav2Vec2PretrainModel, ) from ._emformer_hubert import emformer_hubert_base, emformer_hubert_mode...
"""Tavily Search API toolkit.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.tavily_search.tool import ( TavilyAnswer, TavilySearchResults, ) # Create a way to dynamically look up deprecated imports. # Used...
"""Tavily Search API toolkit.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.tavily_search.tool import ( TavilyAnswer, TavilySearchResults, ) # Create a way to dynamically look up deprecated imports. # Used...
"""Test chat model integration.""" import json from collections.abc import Generator from contextlib import contextmanager from typing import Any import pytest from httpx import Client, Request, Response from langchain_core.messages import ChatMessage from langchain_tests.unit_tests import ChatModelUnitTests from la...
"""Test chat model integration.""" import json from langchain_tests.unit_tests import ChatModelUnitTests from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call class TestChatOllama(ChatModelUnitTests): @property def chat_model_class(self) -> type[ChatOllama]: return Ch...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling ...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( num_classes=1203, cls_predictor_cfg=dict(type='NormedLinear', tem...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( num_classes=1203, cls_predictor_cfg=dict(type='NormedLinear', tem...
from typing import TYPE_CHECKING, Union import numpy as np if TYPE_CHECKING: # pragma: no cover from docarray.typing import T import trimesh class Mesh: FILE_EXTENSIONS = [ 'glb', 'obj', 'ply', ] VERTICES = 'vertices' FACES = 'faces' class MeshDataMixin: """Pro...
import warnings from typing import TYPE_CHECKING import numpy as np if TYPE_CHECKING: # pragma: no cover from docarray.typing import T class MeshDataMixin: """Provide helper functions for :class:`Document` to support 3D mesh data and point cloud.""" def load_uri_to_point_cloud_tensor( self: 'T...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.2.0' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.1.0' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed...
# Copyright (c) OpenMMLab. All rights reserved. from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset from .cityscapes import CityscapesDataset from .coco import CocoDataset from .coco_panoptic import CocoPanopticDataset from .custom import CustomDataset from .dataset_wrappers import (ClassBalancedD...
# Copyright (c) OpenMMLab. All rights reserved. from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset from .cityscapes import CityscapesDataset from .coco import CocoDataset from .coco_panoptic import CocoPanopticDataset from .custom import CustomDataset from .dataset_wrappers import (ClassBalancedD...
import functools import numbers from collections import defaultdict from typing import Any, Dict, Literal, Sequence, Type, TypeVar, Union from torchvision.prototype import datapoints from torchvision.prototype.datapoints._datapoint import FillType, FillTypeJIT from torchvision.transforms.transforms import _check_sequ...
import functools import numbers from collections import defaultdict from typing import Any, Dict, Literal, Sequence, Type, TypeVar, Union from torchvision.prototype import datapoints from torchvision.prototype.datapoints._datapoint import FillType, FillTypeJIT from torchvision.transforms.transforms import _check_sequ...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from mmengine.config import ConfigDict from mmengine.data import InstanceData from parameterized import parameterized from mmdet.models.roi_heads.mask_heads import FCNMaskHead class TestFCNMaskHead(TestCase): ...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from mmengine.config import ConfigDict from mmengine.data import InstanceData from parameterized import parameterized from mmdet.models.roi_heads.mask_heads import FCNMaskHead class TestFCNMaskHead(TestCase): ...
# Copyright (c) OpenMMLab. All rights reserved. from .base_bbox_coder import BaseBBoxCoder from .bucketing_bbox_coder import BucketingBBoxCoder from .delta_xywh_bbox_coder import (DeltaXYWHBBoxCoder, DeltaXYWHBBoxCoderForGLIP) from .distance_point_bbox_coder import DistancePointBBoxC...
# Copyright (c) OpenMMLab. All rights reserved. from .base_bbox_coder import BaseBBoxCoder from .bucketing_bbox_coder import BucketingBBoxCoder from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder from .distance_point_bbox_coder import DistancePointBBoxCoder from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBo...
# Copyright (c) OpenMMLab. All rights reserved. from .activations import SiLU from .bbox_nms import fast_nms, multiclass_nms from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .dropblock import DropBlock from .ema import ExpMom...
# Copyright (c) OpenMMLab. All rights reserved. from .activations import SiLU from .bbox_nms import fast_nms, multiclass_nms from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .dropblock import DropBlock from .ema import ExpMom...
""" This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from Hugging Face. It then fine-tunes this model for some epochs on the STS benchmark dataset. Note: In this example, you must specify a SentenceTransformer model. If you want to fine-tune a huggingface/transformers model like...
""" This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from Hugging Face. It then fine-tunes this model for some epochs on the STS benchmark dataset. Note: In this example, you must specify a SentenceTransformer model. If you want to fine-tune a huggingface/transformers model like...
import os import sys import torch from ._internally_replaced_utils import _get_extension_path _HAS_OPS = False def _has_ops(): return False try: # On Windows Python-3.8.x has `os.add_dll_directory` call, # which is called to configure dll search path. # To find cuda related dlls we need to make ...
import ctypes import os import sys from warnings import warn import torch from ._internally_replaced_utils import _get_extension_path _HAS_OPS = False def _has_ops(): return False try: # On Windows Python-3.8.x has `os.add_dll_directory` call, # which is called to configure dll search path. # To...
"""Test retriever tool.""" from typing import List, Optional from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.schema import NodeWithScore, TextNode, QueryBundle from llama_index.core.tools import RetrieverTool from llama_index.core.postprocessor.types import BaseNodePostprocessor i...
"""Test retriever tool.""" from typing import List, Optional from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.schema import NodeWithScore, TextNode, QueryBundle from llama_index.core.tools import RetrieverTool from llama_index.core.postprocessor.types import BaseNodePostprocessor i...
import os import urllib import pytest from pydantic import parse_obj_as, schema_json_of from docarray.base_document.io.json import orjson_dumps from docarray.typing import TextUrl REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen' CUR_DIR = os.path.dirname(os.path.abspath(__file__)) LOCAL_TXT = os.path.join(CUR_DIR...
import os import urllib import pytest from pydantic import parse_obj_as, schema_json_of from docarray.base_document.io.json import orjson_dumps from docarray.typing import TextUrl REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen' CUR_DIR = os.path.dirname(os.path.abspath(__file__)) LOCAL_TXT = os.path.join(CUR_DIR...
import asyncio import logging from typing import List from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name__) class AsyncWebPageReader(BaseReader): """ Asynchronous web page reader. Reads pages from the web asynchronously. ...
import asyncio import logging from typing import List from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name__) class AsyncWebPageReader(BaseReader): """Asynchronous web page reader. Reads pages from the web asynchronously. A...
import PIL.Image import torch from torchvision import tv_tensors from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_kernel_internal def erase( inpt: torch.Tensor, i: int, j: int, h: in...
import PIL.Image import torch from torchvision import datapoints from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_kernel_internal def erase( inpt: torch.Tensor, i: int, j: int, h: in...
import os from typing import Dict import numpy as np import pytest import xgboost from xgboost import testing as tm from xgboost.testing.ranking import run_normalization, run_score_normalization pytestmark = tm.timeout(30) def comp_training_with_rank_objective( dtrain: xgboost.DMatrix, dtest: xgboost.DMatr...
import os from typing import Dict import numpy as np import pytest import xgboost from xgboost import testing as tm from xgboost.testing.ranking import run_normalization pytestmark = tm.timeout(30) def comp_training_with_rank_objective( dtrain: xgboost.DMatrix, dtest: xgboost.DMatrix, rank_objective: s...
import sys import warnings import torch _onnx_opset_version_11 = 11 _onnx_opset_version_16 = 16 base_onnx_opset_version = _onnx_opset_version_11 def _register_custom_op(): from torch.onnx.symbolic_helper import parse_args from torch.onnx.symbolic_opset11 import select, squeeze, unsqueeze @parse_args("v...
import sys import warnings import torch _onnx_opset_version_11 = 11 _onnx_opset_version_16 = 16 base_onnx_opset_version = _onnx_opset_version_11 def _register_custom_op(): from torch.onnx.symbolic_helper import parse_args from torch.onnx.symbolic_opset11 import select, squeeze, unsqueeze from torch.onnx...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.22.0" SCIPY_MIN_VERSION = "1.8.0" JOBLIB_MIN_VERSION = "1...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.22.0" SCIPY_MIN_VERSION = "1.8.0" JOBLIB_MIN_VERSION = "1...
"""Utilities for JSON Schema.""" from __future__ import annotations from copy import deepcopy from typing import TYPE_CHECKING, Any, Optional if TYPE_CHECKING: from collections.abc import Sequence def _retrieve_ref(path: str, schema: dict) -> dict: components = path.split("/") if components[0] != "#": ...
from __future__ import annotations from copy import deepcopy from typing import TYPE_CHECKING, Any, Optional if TYPE_CHECKING: from collections.abc import Sequence def _retrieve_ref(path: str, schema: dict) -> dict: components = path.split("/") if components[0] != "#": msg = ( "ref p...
import importlib import os import re import types from typing import Any, Optional import numpy as np try: import torch # noqa: F401 except ImportError: torch_imported = False else: torch_imported = True try: import tensorflow as tf # type: ignore # noqa: F401 except (ImportError, TypeError): ...
import importlib import os import re import types from typing import Any, Optional import numpy as np try: import torch # noqa: F401 except ImportError: torch_imported = False else: torch_imported = True try: import tensorflow as tf # type: ignore # noqa: F401 except (ImportError, TypeError): ...
import types from typing import TYPE_CHECKING from docarray.index.backends.in_memory import InMemoryExactNNIndex from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401 ...
import types from typing import TYPE_CHECKING from docarray.index.backends.in_memory import InMemoryExactNNIndex from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401 ...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
"""DashVector reader.""" from typing import Dict, List, Optional import json from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class DashVectorReader(BaseReader): """ DashVector reader. Args: api_key (str): DashVector API key. endpoint (str...
"""DashVector reader.""" from typing import Dict, List, Optional import json from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class DashVectorReader(BaseReader): """DashVector reader. Args: api_key (str): DashVector API key. endpoint (str): Da...
import os from typing import Dict import numpy as np import pytest import xgboost from xgboost import testing as tm from xgboost.testing.ranking import run_normalization, run_score_normalization pytestmark = tm.timeout(30) def comp_training_with_rank_objective( dtrain: xgboost.DMatrix, dtest: xgboost.DMatr...
import os from typing import Dict import numpy as np import pytest import xgboost from xgboost import testing as tm from xgboost.testing.ranking import run_normalization, run_score_normalization pytestmark = tm.timeout(30) def comp_training_with_rank_objective( dtrain: xgboost.DMatrix, dtest: xgboost.DMatr...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): """Smooth L1 loss. Args: pred (torch.Tensor)...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): """Smooth L1 loss. Args: pred (torch.Tensor)...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembled...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembled...
""" This module provides dynamic access to deprecated Zapier tools in LangChain. It supports backward compatibility by forwarding references such as `ZapierNLAListActions` and `ZapierNLARunAction` to their updated locations in the `langchain_community.tools` package. Developers using older import paths will continue ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling o...
__version__ = '0.30.0a3' import logging from docarray.array import DocArray, DocArrayStacked from docarray.base_doc.doc import BaseDoc __all__ = ['BaseDoc', 'DocArray', 'DocArrayStacked'] logger = logging.getLogger('docarray') handler = logging.StreamHandler() formatter = logging.Formatter("%(levelname)s - %(name)...
__version__ = '0.30.0a3' from docarray.array import DocumentArray, DocumentArrayStacked from docarray.base_document.document import BaseDocument import logging __all__ = ['BaseDocument', 'DocumentArray', 'DocumentArrayStacked'] logger = logging.getLogger('docarray') handler = logging.StreamHandler() formatter = log...
# mypy: allow-untyped-defs from torch.ao.quantization.pt2e.utils import _is_sym_size_node from torch.ao.quantization.quantizer.quantizer import QuantizationAnnotation from torch.fx import Node def _annotate_input_qspec_map(node: Node, input_node: Node, qspec): quantization_annotation = node.meta.get( "qu...
# mypy: allow-untyped-defs from torch.ao.quantization.pt2e.utils import _is_sym_size_node from torch.ao.quantization.quantizer.quantizer import QuantizationAnnotation from torch.fx import Node def _annotate_input_qspec_map(node: Node, input_node: Node, qspec): quantization_annotation = node.meta.get( "qu...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers import util from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class Sparse...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers import util from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class Sparse...
"""XGBoost Experimental Federated Learning related API.""" import ctypes from threading import Thread from typing import Any, Dict, Optional from .core import _LIB, _check_call, make_jcargs from .tracker import RabitTracker class FederatedTracker(RabitTracker): """Tracker for federated training. Parameters...
"""XGBoost Experimental Federated Learning related API.""" import ctypes from threading import Thread from typing import Any, Dict, Optional from .core import _LIB, _check_call, make_jcargs from .tracker import RabitTracker class FederatedTracker(RabitTracker): """Tracker for federated training. Parameters...
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_gr...
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_gr...
from typing import Any, Optional from llama_index.core.base.agent.types import TaskStepOutput, TaskStep from llama_index.core.bridge.pydantic import model_validator, field_validator from llama_index.core.instrumentation.events.base import BaseEvent from llama_index.core.chat_engine.types import ( AGENT_CHAT_RESPON...
from typing import Any, Optional from llama_index.core.base.agent.types import TaskStepOutput, TaskStep from llama_index.core.bridge.pydantic import model_validator, field_validator from llama_index.core.instrumentation.events.base import BaseEvent from llama_index.core.chat_engine.types import ( AGENT_CHAT_RESPON...
# Copyright (c) OpenMMLab. All rights reserved. import torch.utils.checkpoint as cp from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from .se_layer import SELayer class InvertedResidual(BaseModule): """Inverted Residual Block. Args: in_channels (int): The input channels of this Mod...
import torch.utils.checkpoint as cp from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from .se_layer import SELayer class InvertedResidual(BaseModule): """Inverted Residual Block. Args: in_channels (int): The input channels of this Module. out_channels (int): The output chan...
import inspect import logging import secrets from typing import Any, Callable, Optional from fastapi import HTTPException, Request, Security from fastapi.security import APIKeyHeader, HTTPBearer from starlette.status import HTTP_401_UNAUTHORIZED from .config import settings from .jwt_utils import parse_jwt_token sec...
import inspect import logging from typing import Any, Callable, Optional from fastapi import HTTPException, Request, Security from fastapi.security import APIKeyHeader, HTTPBearer from starlette.status import HTTP_401_UNAUTHORIZED from .config import settings from .jwt_utils import parse_jwt_token security = HTTPBea...
from collections.abc import Generator from langchain_huggingface.llms import HuggingFacePipeline def test_huggingface_pipeline_streaming() -> None: """Test streaming tokens from huggingface_pipeline.""" llm = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation", pipeline_kwargs=...
from typing import Generator from langchain_huggingface.llms import HuggingFacePipeline def test_huggingface_pipeline_streaming() -> None: """Test streaming tokens from huggingface_pipeline.""" llm = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.saving.file_editor import KerasFileEditor as KerasFileEditor from keras.src.saving.object_registration import ( CustomObjectScope as CustomObjectScope, ) from keras.src.saving.obj...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.saving.file_editor import KerasFileEditor from keras.src.saving.object_registration import CustomObjectScope from keras.src.saving.object_registration import ( CustomObjectScope a...
# deprecated, please use datasets.download.download_manager
# deprecated, please use daatsets.download.download_manager
from typing import Any from langchain_core._api import deprecated from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain.memory.chat_memory import BaseChatMemory @deprecated( since="0.3.1", removal="1.0.0", message=(...
from typing import Any, Dict, List from langchain_core._api import deprecated from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain.memory.chat_memory import BaseChatMemory @deprecated( since="0.3.1", removal="1.0.0", ...
import os from typing import Dict from jina import __default_executor__, __version__ from jina.enums import PodRoleType from jina.hubble.helper import parse_hub_uri from jina.hubble.hubio import HubIO def get_image_name(uses: str) -> str: """The image can be provided in different formats by the user. This fu...
import os from jina import __default_executor__, __version__ from jina.enums import PodRoleType from jina.hubble.helper import parse_hub_uri from jina.hubble.hubio import HubIO def get_image_name(uses: str) -> str: """The image can be provided in different formats by the user. This function converts it to an...
from typing import List, cast from llama_index.core.indices.vector_store.base import VectorStoreIndex from llama_index.core.schema import ( Document, NodeRelationship, QueryBundle, RelatedNodeInfo, TextNode, ) from llama_index.core.vector_stores.simple import SimpleVectorStore def test_simple_que...
from typing import List, cast from llama_index.core.indices.vector_store.base import VectorStoreIndex from llama_index.core.schema import ( Document, NodeRelationship, QueryBundle, RelatedNodeInfo, TextNode, ) from llama_index.core.vector_stores.simple import SimpleVectorStore def test_simple_que...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from typing import Optional import numpy as np import pytest from docarray import BaseDoc, DocList, DocVec from docarray.typing import NdArray class Nested(BaseDoc): tensor: NdArray class Image(BaseDoc): features: Optional[Nested] = None def test_optional_field(): docs = DocVec[Image]([Image() for _...