input
stringlengths
33
5k
output
stringlengths
32
5k
from sentence_transformers import SentenceTransformer, losses, util class AnglELoss(losses.CoSENTLoss): def __init__(self, model: SentenceTransformer, scale: float = 20.0) -> None: """ This class implements AnglE (Angle Optimized) loss. This is a modification of :class:`CoSENTLoss`, design...
from sentence_transformers import SentenceTransformer, losses, util class AnglELoss(losses.CoSENTLoss): def __init__(self, model: SentenceTransformer, scale: float = 20.0): """ This class implements AnglE (Angle Optimized) loss. This is a modification of :class:`CoSENTLoss`, designed to ad...
from jina import Flow import os os.environ['JINA_LOG_LEVEL'] = 'DEBUG' if __name__ == '__main__': with Flow.load_config('flow.yml') as f: f.block()
from jina import Flow if __name__ == '__main__': with Flow.load_config('flow.yml') as f: f.block()
from . import InputExample import os class LabelSentenceReader: """Reads in a file that has at least two columns: a label and a sentence. This reader can for example be used with the BatchHardTripletLoss. Maps labels automatically to integers """ def __init__(self, folder, label_col_idx=0, senten...
from . import InputExample import os class LabelSentenceReader: """Reads in a file that has at least two columns: a label and a sentence. This reader can for example be used with the BatchHardTripletLoss. Maps labels automatically to integers""" def __init__(self, folder, label_col_idx=0, sentence_co...
# Copyright (c) OpenMMLab. All rights reserved. from .base_loop import BaseLoop from .checkpoint import (CheckpointLoader, find_latest_checkpoint, get_deprecated_model_names, get_external_models, get_mmcls_models, get_state_dict, get_torchvision...
# Copyright (c) OpenMMLab. All rights reserved. from .base_loop import BaseLoop from .checkpoint import (CheckpointLoader, get_deprecated_model_names, get_external_models, get_mmcls_models, get_state_dict, get_torchvision_models, load_checkpoint, ...
from typing import Optional import numpy as np import pytest from pydantic import BaseModel, ValidationError from typing_extensions import TypedDict from docarray import BaseDocument, DocumentArray from docarray.documents import AudioDoc, ImageDoc, TextDoc from docarray.documents.helper import ( create_doc, c...
from typing import Optional import numpy as np import pytest from pydantic import BaseModel from typing_extensions import TypedDict from docarray import BaseDocument, DocumentArray from docarray.documents import AudioDoc, ImageDoc, TextDoc from docarray.documents.helper import create_doc, create_from_typeddict from d...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union from uuid import UUID from pydantic import parse_obj_as from docarray.typing.proto_register import _register_proto from docarray.utils._internal.pydantic import is_pydantic_v2 if TYPE_CHECKING: from docarray.proto import NodeProto from docarray.typing....
from typing import TYPE_CHECKING, Type, TypeVar, Union from uuid import UUID from pydantic import BaseConfig, parse_obj_as from pydantic.fields import ModelField from docarray.typing.proto_register import _register_proto if TYPE_CHECKING: from docarray.proto import NodeProto from docarray.typing.abstract_type i...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path from typing import List import pytest from jina import Document, DocumentArray, Executor from ...spacy_text_encoder import SpacyTextEncoder _EMBEDDING_DIM = 96 @pytest.fixture(scope=...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from pathlib import Path import pytest import spacy from jina import Document, DocumentArray, Executor from ...spacy_text_encoder import SpacyTextEncoder def test_config(): ex = Executor.load_c...
_base_ = './tood_r50_fpn_1x_coco.py' max_epochs = 24 # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[16, 22], g...
_base_ = './tood_r50_fpn_1x_coco.py' max_epochs = 24 # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[16, 22], g...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseTripletLoss(TripletLoss): def __init_...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseTripletLoss(TripletLoss): def __init_...
import pprint import torch from torch.utils._pytree import tree_map, tree_map_only class OpenRegTensorMeta: def __init__(self, tensor, checked=True): if checked and not tensor.device.type == "openreg": raise RuntimeError( "Creating OpenRegTensorMeta is only for Tensors on open...
import pprint import torch from torch.utils._pytree import tree_map, tree_map_only class OpenRegTensorMeta: def __init__(self, tensor, checked=True): if checked and not tensor.device.type == "openreg": raise RuntimeError( "Creating OpenRegTensorMeta is only for Tensors on open...
from __future__ import annotations import logging from datasets import load_dataset from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction from sentence_transformers.models import Pooli...
from __future__ import annotations import logging from datasets import load_dataset from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction from sentence_transformers.models import Pooli...
__version__ = '0.30.0a3' from docarray.array import DocumentArray, DocumentArrayStacked from docarray.base_document.document import BaseDocument import logging __all__ = ['BaseDocument', 'DocumentArray', 'DocumentArrayStacked'] logger = logging.getLogger('docarray') handler = logging.StreamHandler() formatter = log...
__version__ = '0.30.0a3' from docarray.array import DocumentArray, DocumentArrayStacked from docarray.base_document.document import BaseDocument __all__ = ['BaseDocument', 'DocumentArray', 'DocumentArrayStacked']
import pytest import torchaudio from torchaudio.pipelines import ( HUBERT_ASR_LARGE, HUBERT_ASR_XLARGE, HUBERT_BASE, HUBERT_LARGE, HUBERT_XLARGE, VOXPOPULI_ASR_BASE_10K_DE, VOXPOPULI_ASR_BASE_10K_EN, VOXPOPULI_ASR_BASE_10K_ES, VOXPOPULI_ASR_BASE_10K_FR, VOXPOPULI_ASR_BASE_10K_IT,...
import pytest import torchaudio from torchaudio.pipelines import ( HUBERT_ASR_LARGE, HUBERT_ASR_XLARGE, HUBERT_BASE, HUBERT_LARGE, HUBERT_XLARGE, VOXPOPULI_ASR_BASE_10K_DE, VOXPOPULI_ASR_BASE_10K_EN, VOXPOPULI_ASR_BASE_10K_ES, VOXPOPULI_ASR_BASE_10K_FR, VOXPOPULI_ASR_BASE_10K_IT,...
from typing import Union from docarray.typing.tensor.video.video_ndarray import VideoNdArray from docarray.utils._internal.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor tf_av...
from typing import Union from docarray.typing.tensor.video.video_ndarray import VideoNdArray from docarray.utils.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor tf_available = ...
import logging from typing import Optional, cast from autogpt_libs.supabase_integration_credentials_store.types import ( UserIntegrations, UserMetadata, UserMetadataRaw, ) from fastapi import HTTPException from prisma import Json from prisma.models import User from backend.data.db import prisma from backe...
import logging from typing import Optional, cast from autogpt_libs.supabase_integration_credentials_store.types import ( UserIntegrations, UserMetadata, UserMetadataRaw, ) from fastapi import HTTPException from prisma import Json from prisma.models import User from backend.data.db import prisma from backe...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) fil...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) fil...
import os from typing import Dict from hubble.executor.helper import parse_hub_uri from hubble.executor.hubio import HubIO from jina import ( __default_executor__, __default_grpc_gateway__, __default_http_gateway__, __default_websocket_gateway__, __version__, ) from jina.enums import PodRoleType ...
import os from typing import Dict from hubble.executor.helper import parse_hub_uri from hubble.executor.hubio import HubIO from jina import ( __default_executor__, __default_grpc_gateway__, __default_http_gateway__, __default_websocket_gateway__, __version__, ) from jina.enums import PodRoleType ...
_base_ = [ '../_base_/models/faster-rcnn_r50-caffe-dc5.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ ...
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_dc5.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ ...
import os import pytest from llama_index.core.base.llms.base import BaseLLM from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole from llama_index.llms.gemini import Gemini from llama_index.llms.gemini.utils import chat_message_to_gemini def test_embedding_class(): names_of_base_class...
from llama_index.core.base.llms.base import BaseLLM from llama_index.llms.gemini import Gemini def test_embedding_class(): names_of_base_classes = [b.__name__ for b in Gemini.__mro__] assert BaseLLM.__name__ in names_of_base_classes
from docarray.array.queryset.parser import QueryParser
from .parser import QueryParser
# Copyright (c) OpenMMLab. All rights reserved. from argparse import ArgumentParser, Namespace from pathlib import Path from tempfile import TemporaryDirectory from mmengine.config import Config from mmengine.utils import mkdir_or_exist try: from model_archiver.model_packaging import package_model from model_...
# Copyright (c) OpenMMLab. All rights reserved. from argparse import ArgumentParser, Namespace from pathlib import Path from tempfile import TemporaryDirectory from mmengine.config import Config from mmengine.utils import mkdir_or_exist try: from model_archiver.model_packaging import package_model from model_...
"""PDF Marker reader.""" from pathlib import Path from typing import Any, Dict, List, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class PDFMarkerReader(BaseReader): """ PDF Marker Reader. Reads a pdf to markdown format and tables with layout. ...
"""PDF Marker reader.""" from pathlib import Path from typing import Any, Dict, List, Optional from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class PDFMarkerReader(BaseReader): """ PDF Marker Reader. Reads a pdf to markdown format and tables with layout. ...
import wave from abc import ABC from typing import BinaryIO, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor T = TypeVar('T', bound='AbstractAudioTensor') MAX_INT_16 = 2**15 class AbstractAudioTensor(AbstractTensor, ABC): def to_bytes(self): """ Convert audio te...
import wave from abc import ABC, abstractmethod from typing import BinaryIO, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor T = TypeVar('T', bound='AbstractAudioTensor') class AbstractAudioTensor(AbstractTensor, ABC): @abstractmethod def to_audio_bytes(self): """ ...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
# Copyright (c) OpenMMLab. All rights reserved. from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, OptimWrapperDict, build_optim_wrapper) # yapf: disable from .scheduler import (ConstantLR, Consta...
# Copyright (c) OpenMMLab. All rights reserved. from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, OptimWrapperDict, build_optim_wrapper) # yapf: disable from .scheduler import (ConstantLR, Consta...
from io import BytesIO from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing.abstract_type import AbstractType from docarray.typing.proto_register import _register_proto from docar...
from io import BytesIO from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing.abstract_type import AbstractType from docarray.typing.proto_register import _register_proto if TYPE_C...
# Copyright (c) OpenMMLab. All rights reserved. import time from typing import Optional, Sequence, Union from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Union[dict, tuple, list]] @HOOKS.register_module() class IterTimerHook(Hook): """A hook that logs the time spent during iterat...
# Copyright (c) OpenMMLab. All rights reserved. import time from typing import Optional, Sequence, Union from mmengine.registry import HOOKS from mmengine.structures import BaseDataElement from .hook import Hook DATA_BATCH = Optional[Sequence[dict]] @HOOKS.register_module() class IterTimerHook(Hook): """A hook ...
from __future__ import annotations from typing import Any, List from langchain_text_splitters.base import TextSplitter class SpacyTextSplitter(TextSplitter): """Splitting text using Spacy package. Per default, Spacy's `en_core_web_sm` model is used and its default max_length is 1000000 (it is the lengt...
from __future__ import annotations from typing import Any, List from langchain_text_splitters.base import TextSplitter class SpacyTextSplitter(TextSplitter): """Splitting text using Spacy package. Per default, Spacy's `en_core_web_sm` model is used and its default max_length is 1000000 (it is the lengt...
# Copyright (c) OpenMMLab. All rights reserved. from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class DeepFashionDataset(CocoDataset): CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footw...
# Copyright (c) OpenMMLab. All rights reserved. from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class DeepFashionDataset(CocoDataset): CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footw...
_base_ = './detr_r50_8xb2-500e_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), bbox_head=dict(in_channels=512))
_base_ = './detr_r50_8xb2-500e_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512]))
from typing import Union, Iterable, MutableSequence, Iterator from docarray.array.storage.memory.backend import needs_id2offset_rebuild from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin from docarray import Document class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like m...
from typing import Union, Iterable, MutableSequence, Iterator from docarray.array.storage.memory.backend import needs_id2offset_rebuild from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin from docarray import Document class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like m...
from typing import List, Union, Any from docarray.helper import dunder_get class GetAttributesMixin: """Provide helper functions for :class:`Document` to allow advanced set and get attributes""" def _get_attributes(self, *fields: str) -> Union[Any, List[Any]]: """Bulk fetch Document fields and retur...
from typing import List, Union, Any from ...helper import dunder_get class GetAttributesMixin: """Provide helper functions for :class:`Document` to allow advanced set and get attributes """ def _get_attributes(self, *fields: str) -> Union[Any, List[Any]]: """Bulk fetch Document fields and return a l...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import O365SearchEvents from langchain_community.tools.office365.events_search import SearchEventsInput # Create a way to dynamically look up deprecated imports. # Used to consoli...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import O365SearchEvents from langchain_community.tools.office365.events_search import SearchEventsInput # Create a way to dynamically look up deprecated imports. # Used to consoli...
from .sox_effects import apply_effects_file, apply_effects_tensor, effect_names, init_sox_effects, shutdown_sox_effects __all__ = [ "init_sox_effects", "shutdown_sox_effects", "effect_names", "apply_effects_tensor", "apply_effects_file", ]
from torchaudio._internal import module_utils as _mod_utils from .sox_effects import apply_effects_file, apply_effects_tensor, effect_names, init_sox_effects, shutdown_sox_effects if _mod_utils.is_sox_available(): import atexit init_sox_effects() atexit.register(shutdown_sox_effects) __all__ = [ "i...
import pytest from datasets.utils.version import Version @pytest.mark.parametrize( "other, expected_equality", [ (Version("1.0.0"), True), ("1.0.0", True), (Version("2.0.0"), False), ("2.0.0", False), ("1", False), ("a", False), (1, False), (Non...
import pytest from datasets.utils.version import Version @pytest.mark.parametrize( "other, expected_equality", [ (Version("1.0.0"), True), ("1.0.0", True), (Version("2.0.0"), False), ("2.0.0", False), ("1", False), ("a", False), (1, False), (Non...
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import tqdm as hf_tqdm from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlite3 i...
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlite3 import sq...
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField class WordCharacterCountBlock(Block): class Input(BlockSchema): text: str = SchemaField( description="Input text to count words and characters", placeholder="Ent...
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField class WordCharacterCountBlock(Block): class Input(BlockSchema): text: str = SchemaField( description="Input text to count words and characters", placeholder="Ent...
"""QuantileDMatrix related tests.""" import numpy as np import pytest from sklearn.model_selection import train_test_split import xgboost as xgb from .data import make_batches, make_categorical def check_ref_quantile_cut(device: str) -> None: """Check obtaining the same cut values given a reference.""" X, ...
"""QuantileDMatrix related tests.""" import numpy as np from sklearn.model_selection import train_test_split import xgboost as xgb from .data import make_batches def check_ref_quantile_cut(device: str) -> None: """Check obtaining the same cut values given a reference.""" X, y, _ = ( data[0] ...
import json from enum import Enum from typing import Any from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request import requests class HttpMethod(Enum): GET = "GET" POST = "POST" PUT = "PUT" DELETE = "DELETE" ...
import json from enum import Enum import requests from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField class HttpMethod(Enum): GET = "GET" POST = "POST" PUT = "PUT" DELETE = "DELETE" PATCH = "PATCH" OPTIONS = "OPTIONS" H...
import os from typing import Dict, List, Tuple, TypeVar, Union T = TypeVar("T") ListLike = Union[List[T], Tuple[T, ...]] NestedDataStructureLike = Union[T, List[T], Dict[str, T]] PathLike = Union[str, bytes, os.PathLike]
import os from typing import Dict, List, TypeVar, Union T = TypeVar("T") NestedDataStructureLike = Union[T, List[T], Dict[str, T]] PathLike = Union[str, bytes, os.PathLike]
import pytest from llama_index.llms.nvidia import NVIDIA @pytest.mark.integration def test_available_models(mode: dict) -> None: models = NVIDIA(**mode).available_models assert models assert isinstance(models, list) assert all(isinstance(model.id, str) for model in models)
import pytest from llama_index.llms.nvidia import NVIDIA @pytest.mark.integration() def test_available_models(mode: dict) -> None: models = NVIDIA(**mode).available_models assert models assert isinstance(models, list) assert all(isinstance(model.id, str) for model in models)
import json from json import JSONDecodeError from typing import Union from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish from langchain_core.exceptions import OutputParserException from langchain_core.messages import ( AIMessage, BaseMessage, ) from langchain_core.outputs import ...
import json from json import JSONDecodeError from typing import Union from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish from langchain_core.exceptions import OutputParserException from langchain_core.messages import ( AIMessage, BaseMessage, ) from langchain_core.outputs import ...
import os import numpy as np import pytest import requests from jina import Client, Document, Flow from tests import random_docs # noinspection PyUnresolvedReferences from tests.integration.crud import CrudIndexer PARAMS = {'top_k': 10} def rest_post(f, endpoint, documents): data = [d.to_dict() for d in docum...
import numpy as np import os import pytest import requests from jina import Flow, Document, Client from tests import random_docs # noinspection PyUnresolvedReferences from tests.integration.crud import CrudIndexer PARAMS = {'top_k': 10} def rest_post(f, endpoint, documents): data = [d.to_dict() for d in docume...
from functools import wraps from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast from backend.data.credit import get_user_credit_model from backend.data.execution import ( ExecutionResult, RedisExecutionEventBus, create_graph_execution, get_execution_results, get_incom...
from functools import wraps from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast from backend.data.credit import get_user_credit_model from backend.data.execution import ( ExecutionResult, create_graph_execution, get_execution_results, get_incomplete_executions, get_la...
from __future__ import annotations from typing import Optional, Type from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from pydantic import BaseModel from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.playwrig...
from __future__ import annotations from typing import Optional, Type from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from pydantic import BaseModel from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.playwrig...
import numpy as np import pytest from docarray import BaseDoc from docarray.base_doc.io.json import orjson_dumps from docarray.typing import NdArray, TorchTensor class NpDoc(BaseDoc): embedding: NdArray[3, 4] embedding_no_shape: NdArray class TorchDoc(BaseDoc): embedding: TorchTensor[3, 4] embeddin...
import numpy as np import pytest from docarray import BaseDoc from docarray.base_doc.io.json import orjson_dumps from docarray.typing import NdArray, TorchTensor class NpDoc(BaseDoc): embedding: NdArray[3, 4] embedding_no_shape: NdArray class TorchDoc(BaseDoc): embedding: TorchTensor[3, 4] embeddin...
from pydantic import BaseModel from backend.data.block import ( Block, BlockCategory, BlockManualWebhookConfig, BlockOutput, BlockSchema, ) from backend.data.model import SchemaField from backend.integrations.providers import ProviderName from backend.integrations.webhooks.compass import CompassWeb...
from pydantic import BaseModel from backend.data.block import ( Block, BlockCategory, BlockManualWebhookConfig, BlockOutput, BlockSchema, ) from backend.data.model import SchemaField from backend.integrations.webhooks.compass import CompassWebhookType class Transcription(BaseModel): text: str...
""" Opendal file and directory reader. A loader that fetches a file or iterates through a directory on AWS S3 or other compatible service. """ import asyncio import tempfile from pathlib import Path from typing import Any, Dict, List, Optional, Union, cast from llama_index.core.readers import SimpleDirectoryReader f...
"""Opendal file and directory reader. A loader that fetches a file or iterates through a directory on AWS S3 or other compatible service. """ import asyncio import tempfile from pathlib import Path from typing import Any, Dict, List, Optional, Union, cast from llama_index.core.readers import SimpleDirectoryReader fr...
_base_ = './mask-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512]))
_base_ = './mask_rcnn_r50_fpn_lsj_200e_8x8_fp16_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512]))
from __future__ import annotations import os import pytest from sentence_transformers import CrossEncoder, SentenceTransformer from sentence_transformers.models import Pooling, Transformer from sentence_transformers.util import is_datasets_available from tests.utils import SafeTemporaryDirectory if is_datasets_avai...
from __future__ import annotations import os import platform import tempfile import pytest from sentence_transformers import CrossEncoder, SentenceTransformer from sentence_transformers.models import Pooling, Transformer from sentence_transformers.util import is_datasets_available if is_datasets_available(): fr...
import os from pathlib import Path from torchaudio.datasets import vctk from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase # Used to generate a unique transcript for each dummy audio file _TRANSCRIPT = [ "Please call Stella", "Ask her to brin...
import os from pathlib import Path from torchaudio.datasets import vctk from torchaudio_unittest.common_utils import ( get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase, ) # Used to generate a unique transcript for each dummy audio file _TRANSCRIPT = [ "Please call Stel...
from typing import TYPE_CHECKING, Any from langchain_core.document_loaders import Blob, BlobLoader from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import ( FileSystemBlobLoader, YoutubeAudioLoader, ) # Create a way to dynamically look up...
from typing import TYPE_CHECKING, Any from langchain_core.document_loaders import Blob, BlobLoader from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import ( FileSystemBlobLoader, YoutubeAudioLoader, ) # Create a way to dynamically look up...
"""Handle chained inputs.""" from typing import Optional, TextIO _TEXT_COLOR_MAPPING = { "blue": "36;1", "yellow": "33;1", "pink": "38;5;200", "green": "32;1", "red": "31;1", } def get_color_mapping( items: list[str], excluded_colors: Optional[list] = None ) -> dict[str, str]: """Get map...
"""Handle chained inputs.""" from typing import Optional, TextIO _TEXT_COLOR_MAPPING = { "blue": "36;1", "yellow": "33;1", "pink": "38;5;200", "green": "32;1", "red": "31;1", } def get_color_mapping( items: list[str], excluded_colors: Optional[list] = None ) -> dict[str, str]: """Get map...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.model import is_model_wrapper from mmengine.runner import ValLoop from mmdet.registry import LOOPS @LOOPS.register_module() class TeacherStudentValLoop(ValLoop): """Loop for validation of model teacher and student.""" def run(self): """La...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.model import is_model_wrapper from mmengine.runner import ValLoop from mmdet.registry import LOOPS @LOOPS.register_module() class TeacherStudentValLoop(ValLoop): """Loop for validation of model teacher and student.""" def run(self): """L...
from typing import TYPE_CHECKING from .github import GitHubOAuthHandler from .google import GoogleOAuthHandler from .notion import NotionOAuthHandler if TYPE_CHECKING: from ..providers import ProviderName from .base import BaseOAuthHandler # --8<-- [start:HANDLERS_BY_NAMEExample] HANDLERS_BY_NAME: dict["Prov...
from .base import BaseOAuthHandler from .github import GitHubOAuthHandler from .google import GoogleOAuthHandler from .notion import NotionOAuthHandler # --8<-- [start:HANDLERS_BY_NAMEExample] HANDLERS_BY_NAME: dict[str, type[BaseOAuthHandler]] = { handler.PROVIDER_NAME: handler for handler in [ GitHub...
""" Collection of examples for using sklearn interface ================================================== For an introduction to XGBoost's scikit-learn estimator interface, see :doc:`/python/sklearn_estimator`. Created on 1 Apr 2015 @author: Jamie Hall """ import pickle import numpy as np from sklearn.datasets impo...
''' Collection of examples for using sklearn interface ================================================== For an introduction to XGBoost's scikit-learn estimator interface, see :doc:`/python/sklearn_estimator`. Created on 1 Apr 2015 @author: Jamie Hall ''' import pickle import numpy as np from sklearn.datasets impo...
# Copyright (c) OpenMMLab. All rights reserved. import copy import time from contextlib import contextmanager from typing import Generator, Optional from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock class DefaultScope(ManagerMixin): """Scope of current task used to reset the current ...
# Copyright (c) OpenMMLab. All rights reserved. import copy import time from contextlib import contextmanager from typing import Generator, Optional from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock class DefaultScope(ManagerMixin): """Scope of current task used to reset the current ...
import logging import numpy as np import os import csv from typing import Optional from sklearn.metrics import ndcg_score logger = logging.getLogger(__name__) class CERerankingEvaluator: """ This class evaluates a CrossEncoder model for the task of re-ranking. Given a query and a list of documents, it c...
import logging import numpy as np import os import csv from typing import Optional from sklearn.metrics import ndcg_score logger = logging.getLogger(__name__) class CERerankingEvaluator: """ This class evaluates a CrossEncoder model for the task of re-ranking. Given a query and a list of documents, it c...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .two_stage import TwoStageDetector @MODELS.register_module() class MaskScoringRCNN(TwoStageDetector): """Mask Scoring RCNN. https://arxiv.org/abs/1903.00241...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .two_stage import TwoStageDetector @MODELS.register_module() class MaskScoringRCNN(TwoStageDetector): """Mask Scoring RCNN. https://arxiv.org/abs/1903....
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production""" __version__ = "0.115.14" from starlette import status as status from .applications import FastAPI as FastAPI from .background import BackgroundTasks as BackgroundTasks from .datastructures import UploadFile as UploadFile fro...
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production""" __version__ = "0.115.13" from starlette import status as status from .applications import FastAPI as FastAPI from .background import BackgroundTasks as BackgroundTasks from .datastructures import UploadFile as UploadFile fro...
from typing import Any, Dict, List, Tuple, Type, cast, Set from docarray import BaseDoc, DocList from docarray.index.abstract import BaseDocIndex from docarray.utils.filter import filter_docs from docarray.utils.find import FindResult def _collect_query_args(method_name: str): # TODO: use partialmethod instead ...
from typing import Any, Dict, List, Tuple, Type, cast from docarray import BaseDoc, DocList from docarray.index.abstract import BaseDocIndex from docarray.utils.filter import filter_docs from docarray.utils.find import FindResult def _collect_query_args(method_name: str): # TODO: use partialmethod instead def i...
"""In memory document index.""" import operator import uuid from collections.abc import Sequence from typing import Any, Optional, cast from pydantic import Field from langchain_core._api import beta from langchain_core.callbacks import CallbackManagerForRetrieverRun from langchain_core.documents import Document fro...
import operator import uuid from collections.abc import Sequence from typing import Any, Optional, cast from pydantic import Field from langchain_core._api import beta from langchain_core.callbacks import CallbackManagerForRetrieverRun from langchain_core.documents import Document from langchain_core.indexing import ...
from typing import Union, BinaryIO, TYPE_CHECKING from docarray.document.mixins.helper import _uri_to_blob, _get_file_context if TYPE_CHECKING: from docarray.typing import T class UriFileMixin: """Provide helper functions for :class:`Document` to dump content to a file.""" def save_uri_to_file(self: 'T...
from typing import Union, BinaryIO, TYPE_CHECKING from .helper import _uri_to_blob, _get_file_context if TYPE_CHECKING: from ...typing import T class UriFileMixin: """Provide helper functions for :class:`Document` to dump content to a file.""" def save_uri_to_file(self: 'T', file: Union[str, BinaryIO])...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp import mmcv from mmcv import Config, DictAction from mmcv.runner import init_dist from tools.analysis_tools.benchmark import measure_inferense_speed def parse_args(): parser = argparse.ArgumentParser( descript...
import argparse import os import os.path as osp import mmcv from mmcv import Config, DictAction from mmcv.runner import init_dist from tools.analysis_tools.benchmark import measure_inferense_speed def parse_args(): parser = argparse.ArgumentParser( description='MMDet benchmark a model of FPS') parser...
"""Popular unsupervised clustering algorithms.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._affinity_propagation import AffinityPropagation, affinity_propagation from ._agglomerative import ( AgglomerativeClustering, FeatureAgglomeration, linkage_tree, ward_...
"""Popular unsupervised clustering algorithms.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._affinity_propagation import AffinityPropagation, affinity_propagation from ._agglomerative import ( AgglomerativeClustering, FeatureAgglomeration, linkage_tree, ward_...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Callable import numpy as np from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator, dataset_name_to_id from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import ( ...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator, dataset_name_to_id from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import ( SparseInformationRetrievalE...
""" The :mod:`jina.proto` defines the protobuf used in jina. It is the core message protocol used in communicating between :class:`jina.orchestrate.deployments.Deployment`. It also defines the interface of a gRPC service. """
""" The :mod:`jina.proto` defines the protobuf used in jina. It is the core message protocol used in communicating between :class:`jina.orchestrate.deployments.BaseDeployment`. It also defines the interface of a gRPC service. """
import warnings from typing import TYPE_CHECKING, Optional, Tuple, TypeVar from docarray.typing import ImageBytes from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image import ImageNdArray from docarray.typing.url.any_url import AnyUrl from docarray.utils._internal.misc import is_...
import warnings from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union from docarray.typing import ImageBytes from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image import ImageNdArray from docarray.typing.url.any_url import AnyUrl from docarray.utils._intern...
from typing import Any, Optional from typing_inspect import get_args, is_union_type from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True if type is a type Tensor or an Optional Tensor type.""" return isinstance(type_, type) and issubclass(t...
from typing import Any, Optional from typing_inspect import get_args, is_optional_type, is_union_type from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True if type is a type Tensor or an Optional Tensor type.""" return isinstance(type_, type...
""" This script trains sentence transformers with a triplet loss function. As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks. """ import logging import traceback from datetime import datetime from datasets import load_da...
""" This script trains sentence transformers with a triplet loss function. As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks. """ import logging import traceback from datetime import datetime from datasets import load_da...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence import torch from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class EmptyCacheHook(Hook): """Releases all unoccupied cached GPU memory during the proc...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence import torch from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class EmptyCacheHook(Hook): """Releases all unoccupied cached GPU memory during the proc...
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators from torchvision.transforms import InterpolationMode # usort: skip from ._meta import ( clamp_bounding_box, convert_format_bounding_box, convert_color_space_image_tensor, convert_color_s...
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators from torchvision.transforms import InterpolationMode # usort: skip from ._meta import ( clamp_bounding_box, convert_format_bounding_box, convert_color_space_image_tensor, convert_color_s...
import numpy as np import pytest from typing import Dict, List from docarray import BaseDoc, DocList from docarray.base_doc import AnyDoc from docarray.documents import ImageDoc, TextDoc from docarray.typing import NdArray @pytest.mark.proto def test_simple_proto(): class CustomDoc(BaseDoc): text: str ...
import numpy as np import pytest from docarray import BaseDoc, DocList from docarray.base_doc import AnyDoc from docarray.documents import ImageDoc, TextDoc from docarray.typing import NdArray @pytest.mark.proto def test_simple_proto(): class CustomDoc(BaseDoc): text: str tensor: NdArray da ...
from typing import Any, Union import pytest from langchain_core.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operat...
from typing import Any, Dict, List, Tuple, Union import pytest from langchain_core.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document from langchain_core.structured_query import ( Comparator, Comparison, Op...
_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( depths=depths, init_cfg=dict(type='Pretrained', ...
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( depths=depths, init_cfg=dict(type='Pretrained', ...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.documents import AudioDoc from docarray.typing import AnyEmbedding, AnyTensor from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.video.vide...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.documents import AudioDoc from docarray.typing import AnyEmbedding, AnyTensor from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.video.vide...
"""Test Aleph Alpha specific stuff.""" import pytest from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.aleph_alpha import AlephAlpha @pytest.mark.requires("aleph_alpha_client") def test_api_key_is_secret_string() -> None: llm = AlephAlpha(aleph_alpha_api...
"""Test Aleph Alpha specific stuff.""" import pytest from pydantic import SecretStr from pytest import CaptureFixture, MonkeyPatch from langchain_community.llms.aleph_alpha import AlephAlpha @pytest.mark.requires("aleph_alpha_client") def test_api_key_is_secret_string() -> None: llm = AlephAlpha(aleph_alpha_api...
# Copyright (c) OpenMMLab. All rights reserved. import ast import os.path as osp import re import warnings from typing import Tuple from mmengine.fileio import load from mmengine.utils import check_file_exist MODULE2PACKAGE = { 'mmcls': 'mmcls', 'mmdet': 'mmdet', 'mmdet3d': 'mmdet3d', 'mmseg': 'mmsegm...
# Copyright (c) OpenMMLab. All rights reserved. import ast import os.path as osp import re import warnings from typing import Tuple from mmengine.fileio import load from mmengine.utils import check_file_exist MODULE2PACKAGE = { 'mmcls': 'mmcls', 'mmdet': 'mmdet', 'mmdet3d': 'mmdet3d', 'mmseg': 'mmsegm...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/d...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file...
"""Module wrapping the Client of Jina.""" import argparse from typing import TYPE_CHECKING, Optional, Union, overload from jina.helper import parse_client __all__ = ['Client'] from jina.enums import GatewayProtocolType if TYPE_CHECKING: from jina.clients.grpc import AsyncGRPCClient, GRPCClient from jina.cli...
"""Module wrapping the Client of Jina.""" import argparse from typing import TYPE_CHECKING, Optional, Union, overload from jina.helper import parse_client __all__ = ['Client'] from jina.enums import GatewayProtocolType if TYPE_CHECKING: from jina.clients.grpc import AsyncGRPCClient, GRPCClient from jina.cli...
import numpy as np import pytest from absl.testing import parameterized from keras.src import layers from keras.src import models from keras.src import testing from keras.src.utils import summary_utils class SummaryUtilsTest(testing.TestCase, parameterized.TestCase): @parameterized.parameters([("adam",), (None,)...
import numpy as np import pytest from absl.testing import parameterized from keras.src import layers from keras.src import models from keras.src import testing from keras.src.utils import summary_utils class SummaryUtilsTest(testing.TestCase, parameterized.TestCase): @parameterized.parameters([("adam",), (None,)...
"""Standard LangChain interface tests""" from typing import Optional from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessageChunk, BaseMessageChunk from langchain_core.rate_limiters import InMemoryRateLimiter from langchain_tests.integration_tests import ( # type: ignor...
"""Standard LangChain interface tests""" import pytest # type: ignore[import-not-found] from langchain_core.language_models import BaseChatModel from langchain_core.rate_limiters import InMemoryRateLimiter from langchain_tests.integration_tests import ( # type: ignore[import-not-found] ChatModelIntegrationTests,...
import asyncio import os import random import string import tempfile import time import pytest from jina import helper @pytest.fixture(scope='function') def random_workspace_name(): """Generate a random workspace name with digits and letters.""" rand = ''.join(random.choices(string.ascii_uppercase + string....
import asyncio import os import random import string import tempfile import time import pytest from jina import helper @pytest.fixture(scope='function') def random_workspace_name(): """Generate a random workspace name with digits and letters.""" rand = ''.join(random.choices(string.ascii_uppercase + string....
from contextlib import suppress from docutils import nodes from docutils.parsers.rst import Directive from sklearn.utils import all_estimators from sklearn.utils._test_common.instance_generator import _construct_instance from sklearn.utils._testing import SkipTest class AllowNanEstimators(Directive): @staticmet...
from contextlib import suppress from docutils import nodes from docutils.parsers.rst import Directive from sklearn.utils import all_estimators from sklearn.utils._test_common.instance_generator import _construct_instance from sklearn.utils._testing import SkipTest class AllowNanEstimators(Directive): @staticmet...
import os from pathlib import Path import numpy as np import pytest from PIL.Image import Image, fromarray from jina import DocumentArray, Document, Executor from ...normalizer import ImageNormalizer @pytest.fixture def numpy_image_uri(tmpdir): blob = np.random.randint(255, size=(96, 96, 3), dtype='uint8') ...
import os from pathlib import Path import numpy as np import pytest from PIL.Image import Image, fromarray from jina import DocumentArray, Document, Executor from ...normalizer import ImageNormalizer @pytest.fixture def numpy_image_uri(tmpdir): blob = np.random.randint(255, size=(96, 96, 3), dtype='uint8') ...
import json import re from re import Pattern from typing import Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException from langchain.agents.agent import AgentOutputParser from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS FINAL_ANSWER_A...
import json import re from re import Pattern from typing import Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException from langchain.agents.agent import AgentOutputParser from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS FINAL_ANSWER_A...
from abc import ABC, abstractmethod from typing import Any, ClassVar, Dict, List, Optional from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict class RetrievalMetricResult(BaseModel): """ Metric result. Attributes: score (float): Score for the metric metadata (Dict[s...
from abc import ABC, abstractmethod from typing import Any, ClassVar, Dict, List, Optional from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict class RetrievalMetricResult(BaseModel): """Metric result. Attributes: score (float): Score for the metric metadata (Dict[str, A...
"""Copyright 2019-2024, XGBoost contributors""" import os from typing import Generator import numpy as np import pytest import scipy.sparse from dask import dataframe as dd from distributed import Client, LocalCluster from xgboost import dask as dxgb from xgboost import testing as tm from xgboost.testing import dask...
"""Copyright 2019-2024, XGBoost contributors""" import os from typing import Generator import numpy as np import pytest import scipy.sparse from dask import dataframe as dd from distributed import Client, LocalCluster from xgboost import dask as dxgb from xgboost import testing as tm @pytest.fixture(scope="module"...
from docarray import BaseDoc from docarray.typing import Mesh3DUrl def test_set_mesh_url(): class MyDocument(BaseDoc): mesh_url: Mesh3DUrl d = MyDocument(mesh_url="https://jina.ai/mesh.obj") assert isinstance(d.mesh_url, Mesh3DUrl) assert d.mesh_url == "https://jina.ai/mesh.obj"
from docarray import BaseDocument from docarray.typing import Mesh3DUrl def test_set_mesh_url(): class MyDocument(BaseDocument): mesh_url: Mesh3DUrl d = MyDocument(mesh_url="https://jina.ai/mesh.obj") assert isinstance(d.mesh_url, Mesh3DUrl) assert d.mesh_url == "https://jina.ai/mesh.obj"
"""Interface with the LangChain Hub.""" from __future__ import annotations import json from collections.abc import Sequence from typing import Any, Optional from langchain_core.load.dump import dumps from langchain_core.load.load import loads from langchain_core.prompts import BasePromptTemplate def _get_client( ...
"""Interface with the LangChain Hub.""" from __future__ import annotations import json from collections.abc import Sequence from typing import Any, Optional from langchain_core.load.dump import dumps from langchain_core.load.load import loads from langchain_core.prompts import BasePromptTemplate def _get_client( ...
# Copyright (c) OpenMMLab. All rights reserved. from .default_scope import DefaultScope from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RU...
# Copyright (c) OpenMMLab. All rights reserved. from .default_scope import DefaultScope from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, LOOPS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS,...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( data_preprocessor=dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( data_preprocessor=dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675...
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.csv.csv import Csv, CsvConfig from ..utils import require_pil @pytest.fixture def...
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def csv_file(tmp_path): filename = tmp_path / "file.csv" data = textwrap.dedent( """\ ...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') tra...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') tra...
# Copyright (c) OpenMMLab. All rights reserved. from .checkloss_hook import CheckInvalidLossHook from .mean_teacher_hook import MeanTeacherHook from .memory_profiler_hook import MemoryProfilerHook from .num_class_check_hook import NumClassCheckHook from .pipeline_switch_hook import PipelineSwitchHook from .set_epoch_in...
# Copyright (c) OpenMMLab. All rights reserved. from .checkloss_hook import CheckInvalidLossHook from .mean_teacher_hook import MeanTeacherHook from .memory_profiler_hook import MemoryProfilerHook from .num_class_check_hook import NumClassCheckHook from .pipeline_switch_hook import PipelineSwitchHook from .set_epoch_in...
_base_ = 'solov2_r50_fpn_1x_coco.py' # model settings model = dict( mask_head=dict( stacked_convs=2, feat_channels=256, scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)), mask_feature_head=dict(out_channels=128))) # dataset settings train_pipeline = [ dict( ...
_base_ = 'solov2_r50_fpn_1x_coco.py' # model settings model = dict( mask_head=dict( stacked_convs=2, feat_channels=256, scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)), mask_feature_head=dict(out_channels=128))) # dataset settings train_pipeline = [ dict( ...
from __future__ import annotations from typing import TYPE_CHECKING, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) if TYPE_CHECKING: from timescale_vector import client class TimescaleVectorTranslator(...
from __future__ import annotations from typing import TYPE_CHECKING, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) if TYPE_CHECKING: from timescale_vector import client class TimescaleVectorTranslator(...
import importlib class LazyModule: def __init__(self, name, pip_name=None, import_error_msg=None): self.name = name self.pip_name = pip_name or name self.import_error_msg = import_error_msg or ( f"This requires the {self.name} module. " f"You can install it via `pip...
import importlib class LazyModule: def __init__(self, name, pip_name=None, import_error_msg=None): self.name = name self.pip_name = pip_name or name self.import_error_msg = import_error_msg or ( f"This requires the {self.name} module. " f"You can install it via `pip...
from pathlib import Path from typing import List, Tuple, Union import torch import torchaudio from torch.utils.data import Dataset SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]] class LibriMix(Dataset): r"""*LibriMix* :cite:`cosentino2020librimix` dataset. Args: root (str or Path): The p...
from pathlib import Path from typing import List, Tuple, Union import torch import torchaudio from torch.utils.data import Dataset SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]] class LibriMix(Dataset): r"""Create the *LibriMix* :cite:`cosentino2020librimix` dataset. Args: root (str or P...
import warnings from abc import ABC from typing import TYPE_CHECKING, Any, BinaryIO, Dict, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils._internal.misc import import_library, is_notebook if TYPE_CHECKING: from docarray.typing.bytes.audio_bytes import AudioByt...
import warnings from abc import ABC from typing import TYPE_CHECKING, Any, BinaryIO, Dict, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils._internal.misc import import_library, is_notebook T = TypeVar('T', bound='AbstractAudioTensor') MAX_INT_16 = 2**15 class Ab...