input
stringlengths
33
5k
output
stringlengths
32
5k
import os import time import pytest from jina import Client, Document, DocumentArray, Flow @pytest.mark.parametrize('shards', [1, 2]) @pytest.mark.parametrize('replicas', [1, 3, 4]) def test_containerruntime_args( docker_image_name, docker_image_built, shards, replicas, port_generator ): exposed_port = port...
import os import time import pytest from jina import Client, Document, DocumentArray, Flow cur_dir = os.path.dirname(os.path.abspath(__file__)) img_name = 'jina/replica-exec' @pytest.fixture(scope='function') def docker_image_built(): import docker client = docker.from_env() client.images.build(path=...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_message_histories import Neo4jChatMessageHistory # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling o...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_message_histories import Neo4jChatMessageHistory # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling o...
"""Simple reader that turns an iterable of strings into a list of Documents.""" from typing import List from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document class StringIterableReader(BasePydanticReader): """ String Iterable Reader. Gets a list of do...
"""Simple reader that turns an iterable of strings into a list of Documents.""" from typing import List from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document class StringIterableReader(BasePydanticReader): """String Iterable Reader. Gets a list of documen...
"""Chain that hits a URL and then uses an LLM to parse results.""" from __future__ import annotations from typing import Any, Dict, List, Optional from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain_core.callbacks import CallbackManagerForChainRun from pydantic import ConfigD...
"""Chain that hits a URL and then uses an LLM to parse results.""" from __future__ import annotations from typing import Any, Dict, List, Optional from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain_core.callbacks import CallbackManagerForChainRun from pydantic import ConfigD...
import gc import unittest import numpy as np import torch from diffusers import FluxPipeline, FluxPriorReduxPipeline from diffusers.utils import load_image from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, numpy_cosine_similarity_distance, require_big_accelerator, slow...
import gc import unittest import numpy as np import pytest import torch from diffusers import FluxPipeline, FluxPriorReduxPipeline from diffusers.utils import load_image from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, numpy_cosine_similarity_distance, require_big_acceler...
import warnings from abc import ABC from typing import Any, Optional from langchain_core._api import deprecated from langchain_core.chat_history import ( BaseChatMessageHistory, InMemoryChatMessageHistory, ) from langchain_core.memory import BaseMemory from langchain_core.messages import AIMessage, HumanMessag...
import warnings from abc import ABC from typing import Any, Optional from langchain_core._api import deprecated from langchain_core.chat_history import ( BaseChatMessageHistory, InMemoryChatMessageHistory, ) from langchain_core.memory import BaseMemory from langchain_core.messages import AIMessage, HumanMessag...
def __getattr__(name: str): if name in ["ctc_decoder", "lexicon_decoder"]: import warnings from torchaudio.models.decoder import ctc_decoder warnings.warn( f"{__name__}.{name} has been moved to torchaudio.models.decoder.ctc_decoder", DeprecationWarning, ) ...
_INITIALIZED = False _LAZILY_IMPORTED = [ "Hypothesis", "CTCDecoder", "ctc_decoder", "lexicon_decoder", "download_pretrained_files", ] def _init_extension(): import torchaudio torchaudio._extension._load_lib("libtorchaudio_decoder") global _INITIALIZED _INITIALIZED = True def _...
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch import Tensor from mmdet.models.layers import ResLayer, SimplifiedBasicBlock from mmdet.registry import MODELS from mmdet.utils import M...
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch import Tensor from mmdet.core.utils.typing import MultiConfig, OptConfigType from mmdet.models.utils import ResLayer, SimplifiedBasicBlo...
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] img_scale = (640, 640) # height, width # model settings model = dict( type='YOLOX', input_size=img_scale, random_size_range=(15, 25), random_size_interval=10, backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen...
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] img_scale = (640, 640) # height, width # model settings model = dict( type='YOLOX', input_size=img_scale, random_size_range=(15, 25), random_size_interval=10, backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen...
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unles...
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unles...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), ...
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), ...
"""Module for argparse for Client""" def mixin_comm_protocol_parser(parser): """Add the arguments for the protocol to the parser :param parser: the parser configure """ from jina.enums import GatewayProtocolType parser.add_argument( '--protocol', type=GatewayProtocolType.from_st...
"""Module for argparse for Client""" def mixin_comm_protocol_parser(parser): """Add the arguments for the protocol to the parser :param parser: the parser configure """ from jina.enums import GatewayProtocolType parser.add_argument( '--protocol', type=GatewayProtocolType.from_st...
from pathlib import Path from typing import Dict import numpy as np from jina import DocumentArray, Document, Executor from ...paddle_image import ImagePaddlehubEncoder input_dim = 224 target_output_dim = 2048 num_doc = 2 test_data = np.random.rand(num_doc, 3, input_dim, input_dim) tmp_files = [] def test_config():...
import os from typing import Dict import numpy as np from jina import DocumentArray, Document from ...paddle_image import ImagePaddlehubEncoder directory = os.path.dirname(os.path.realpath(__file__)) input_dim = 224 target_output_dim = 2048 num_doc = 2 test_data = np.random.rand(num_doc, 3, input_dim, input_dim) tmp...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class DistSamplerSeedHook(Hook): """Data-loading sampler for distributed training. When distributed training, it is only useful in conjunction with :obj:`EpochBasedRunner`, ...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class DistSamplerSeedHook(Hook): """Data-loading sampler for distributed training. When distributed training, it is only useful in conjunction with :obj:`EpochBasedRunner`, ...
from functools import partial from torchaudio.models import emformer_rnnt_base from torchaudio.pipelines import RNNTBundle EMFORMER_RNNT_BASE_MUSTC = RNNTBundle( _rnnt_path="models/emformer_rnnt_base_mustc.pt", _rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501), _global_stats_path="pipeline-...
from functools import partial from torchaudio.models import emformer_rnnt_base from torchaudio.pipelines import RNNTBundle EMFORMER_RNNT_BASE_MUSTC = RNNTBundle( _rnnt_path="models/emformer_rnnt_base_mustc.pt", _rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501), _global_stats_path="pipeline-...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://re...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://re...
# Copyright (c) OpenMMLab. All rights reserved. from .conditional_detr_layers import (ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer) from .dab_detr_layers import (DABDetrTransformerDecoder, DABDetrTransformerDecoderLayer, ...
# Copyright (c) OpenMMLab. All rights reserved. from .conditional_detr_layers import (ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer) from .dab_detr_layers import (DABDetrTransformerDecoder, DABDetrTransformerDecoderLayer, ...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn from autogpt_libs.feature_flag.client import ( initialize_launchdarkly, shutdown_launchdarkly, ) import backend.data.block import backend.data.db import backend.data.graph imp...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn import backend.data.block import backend.data.db import backend.data.graph import backend.data.user import backend.server.routers.v1 import backend.util.service import backend.util.se...
# Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import unittest import numpy as np from mmengine.data import BaseDataElement as PixelData from mmengine.data import InstanceData from mmdet.core import DetDataSample from mmdet.core.mask import BitmapMasks from mmdet.datasets.pipelines ...
# Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import unittest import numpy as np from mmengine.data import BaseDataElement as PixelData from mmengine.data import InstanceData from mmdet.core import DetDataSample from mmdet.core.mask import BitmapMasks from mmdet.datasets.pipelines ...
"""A unit test meant to catch accidental introduction of non-optional dependencies.""" from collections.abc import Mapping from pathlib import Path from typing import Any import pytest import toml from packaging.requirements import Requirement HERE = Path(__file__).parent PYPROJECT_TOML = HERE / "../../pyproject.to...
"""A unit test meant to catch accidental introduction of non-optional dependencies.""" from collections.abc import Mapping from pathlib import Path from typing import Any import pytest import toml from packaging.requirements import Requirement HERE = Path(__file__).parent PYPROJECT_TOML = HERE / "../../pyproject.to...
"""Message responsible for deleting other messages.""" from typing import Any, Literal from langchain_core.messages.base import BaseMessage class RemoveMessage(BaseMessage): """Message responsible for deleting other messages.""" type: Literal["remove"] = "remove" """The type of the message (used for se...
"""Message responsible for deleting other messages.""" from typing import Any, Literal from langchain_core.messages.base import BaseMessage class RemoveMessage(BaseMessage): """Message responsible for deleting other messages.""" type: Literal["remove"] = "remove" """The type of the message (used for se...
from langchain_core.tools import BaseTool, tool from langchain_tests.integration_tests import ToolsIntegrationTests from langchain_tests.unit_tests import ToolsUnitTests @tool def parrot_multiply_tool(a: int, b: int) -> int: """Multiply two numbers like a parrot. Parrots always add eighty for their matey.""" ...
from langchain_core.tools import BaseTool, tool from langchain_tests.integration_tests import ToolsIntegrationTests from langchain_tests.unit_tests import ToolsUnitTests @tool def parrot_multiply_tool(a: int, b: int) -> int: """Multiply two numbers like a parrot. Parrots always add eighty for their matey.""" ...
_base_ = [ '../_base_/models/cascade-mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa m...
_base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa m...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import pytest from jina import Document, DocumentArray from match_merger import MatchMerger @pytest.fixture def docs_matrix(): return [ DocumentArray( [ Document( ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import pytest from jina import Document, DocumentArray from ...match_merger import MatchMerger @pytest.fixture def docs_matrix(): return [ DocumentArray( [ Document( ...
import tempfile from enum import Enum from typing import Any, Dict, Optional, Union from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from langchain_core.utils import get_from_dict_or_env from pydantic import model_validator def _import_elevenlabs() -> Any: ...
import tempfile from enum import Enum from typing import Any, Dict, Optional, Union from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from langchain_core.utils import get_from_dict_or_env from pydantic import model_validator def _import_elevenlabs() -> Any: ...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 11 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .registry import Registry # manage all kinds of runners like `EpochBasedRunner` an...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 11 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .registry import Registry # manage all kinds of runners like `EpochBasedRunner` an...
from __future__ import annotations from sentence_transformers.losses.MSELoss import MSELoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseMSELoss(MSELoss): def __init__(self, model: SparseEncoder) -> None: """ Computes the MSE loss between the computed s...
from __future__ import annotations from sentence_transformers.losses.MSELoss import MSELoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseMSELoss(MSELoss): def __init__(self, model: SparseEncoder) -> None: return super().__init__(model)
__version__ = '0.36.0' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.utils._internal.misc import _get_path_from_docarray_root_level __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler()...
__version__ = '0.35.1' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.utils._internal.misc import _get_path_from_docarray_root_level __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler()...
import os import time import pytest from jina import Flow, Document, Client cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def docker_image(): import docker client = docker.from_env() client.images.build(path=os.path.join(cur_dir), tag='override-config-test') client.close() ...
import os import time import pytest from jina import Flow, Document, Client cur_dir = os.path.dirname(os.path.abspath(__file__)) exposed_port = 12345 @pytest.fixture() def docker_image(): import docker client = docker.from_env() client.images.build(path=os.path.join(cur_dir), tag='override-config-test'...
from llama_index_instrumentation.span_handlers.null import NullSpanHandler # noqa
import inspect from typing import Dict, Optional, Any from llama_index.core.instrumentation.span_handlers.base import BaseSpanHandler from llama_index.core.instrumentation.span.base import BaseSpan class NullSpanHandler(BaseSpanHandler[BaseSpan]): @classmethod def class_name(cls) -> str: """Class name...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import numpy as np import pytest from jina import Document, DocumentArray, Flow from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import import_vectors from .. import Hnswl...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import numpy as np import pytest from jina import Document, DocumentArray, Flow from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import import_vectors from .. import Hnswl...
# ruff: noqa: E402 import pytest # Rewrite assert statements for test suite so that implementations can # see the full error message from failed asserts. # https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#assertion-rewriting modules = [ "base_store", "cache", "chat_models", "vectorstores", ...
# ruff: noqa: E402 import pytest # Rewrite assert statements for test suite so that implementations can # see the full error message from failed asserts. # https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#assertion-rewriting modules = [ "base_store", "cache", "chat_models", "vectorstores", ...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import torch from docarray import BaseDoc from docarray.typing import TorchTensor def test_tensor_ops(): class A(BaseDoc): tensor: TorchTensor[3, 224, 224] class B(BaseDoc): tensor: TorchTensor[3, 112, 224] tensor = A(tensor=torch.ones(3, 224, 224)).tensor tensord = A(tensor=torch.o...
from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any from docarray.document.data import DocumentData from docarray.document.mixins import AllMixins from docarray.base import BaseDCType from docarray.math.ndarray import detach_tensor_if_present if TYPE_CHECKING: from docarray.typing impo...
from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any from .data import DocumentData from .mixins import AllMixins from ..base import BaseDCType from ..math.ndarray import detach_tensor_if_present if TYPE_CHECKING: from ..typing import ArrayType, StructValueType, DocumentContentType cl...
""" This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training. It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version. Nowadays, with Sentence Transformers v3+, it is recommended to use the `Senten...
from __future__ import annotations from torch.utils.data import Dataset from sentence_transformers import SentenceTransformer from sentence_transformers.readers.InputExample import InputExample class SentencesDataset(Dataset): """ DEPRECATED: This class is no longer used. Instead of wrapping your List of In...
# Copyright (c) OpenMMLab. All rights reserved. from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, TRANSFORMS, ...
# Copyright (c) OpenMMLab. All rights reserved. from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, TRANSFORMS, W...
"""Callback Handler that tracks AIMessage.usage_metadata.""" import threading from collections.abc import Generator from contextlib import contextmanager from contextvars import ContextVar from typing import Any, Optional from langchain_core._api import beta from langchain_core.callbacks import BaseCallbackHandler fr...
"""Callback Handler that tracks AIMessage.usage_metadata.""" import threading from collections.abc import Generator from contextlib import contextmanager from contextvars import ContextVar from typing import Any, Optional from langchain_core._api import beta from langchain_core.callbacks import BaseCallbackHandler fr...
# Owner(s): ["oncall: distributed"] import torch import torch.nn as nn from torch.distributed.checkpoint.state_dict import get_state_dict from torch.distributed.device_mesh import _mesh_resources, init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor import ...
# Owner(s): ["oncall: distributed"] import torch import torch.nn as nn from torch.distributed.checkpoint.state_dict import get_state_dict from torch.distributed.device_mesh import _mesh_resources, init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor import ...
# Copyright (c) OpenMMLab. All rights reserved. import math import torch from torch.utils.data import DistributedSampler as _DistributedSampler from mmdet.core.utils import sync_random_seed from mmdet.utils import get_device class DistributedSampler(_DistributedSampler): def __init__(self, dat...
# Copyright (c) OpenMMLab. All rights reserved. import math import torch from torch.utils.data import DistributedSampler as _DistributedSampler from mmdet.core.utils import sync_random_seed from mmdet.utils import get_device class DistributedSampler(_DistributedSampler): def __init__(self, data...
import pytest from jina import Executor, Flow, requests @pytest.fixture() def get_executor(): class DummyExecutor(Executor): @requests(on='/foo') def foo(self, docs, **kwargs): ... return DummyExecutor def test_disable_monitoring_on_pods(port_generator, get_executor): port0 = port_gene...
import pytest from jina import Executor, Flow, requests @pytest.fixture() def get_executor(): class DummyExecutor(Executor): @requests(on='/foo') def foo(self, docs, **kwargs): ... return DummyExecutor def test_disable_monitoring_on_pods(port_generator, get_executor): port0...
""" Prompts for implementing Chain of Abstraction. While official prompts are not given (and the paper finetunes models for the task), we can take inspiration and use few-shot prompting to generate a prompt for implementing chain of abstraction in an LLM agent. """ REASONING_PROMPT_TEMPALTE = """Generate an abstract ...
""" Prompts for implementing Chain of Abstraction. While official prompts are not given (and the paper finetunes models for the task), we can take inspiration and use few-shot prompting to generate a prompt for implementing chain of abstraction in an LLM agent. """ REASONING_PROMPT_TEMPALTE = """Generate an abstract...
"""DeepLake reader.""" from typing import List, Optional, Union import numpy as np from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document distance_metric_map = { "l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2), "l1": lambda a, b: np.linalg.norm(a - b, axis=1...
"""DeepLake reader.""" from typing import List, Optional, Union import numpy as np from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document distance_metric_map = { "l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2), "l1": lambda a, b: np.linalg.norm(a - b, axis=1,...
import numpy as np def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: """Return the number of possible shards according to the input gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case # until we decide how to define sharding without ambiguity f...
from typing import List import numpy as np def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: """Return the number of possible shards according to the input gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case # until we decide how to define sha...
import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDoc from docarray.documents import ImageDoc from docarray.typing import ImageBytes from docarray.utils._internal.misc import is_tf_available from docarray.utils._internal.pydantic import is_pydantic_v2 tf_availabl...
import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDoc from docarray.documents import ImageDoc from docarray.typing import ImageBytes from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow ...
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' from typing import Any, Iterable, Optional import librosa as lr import numpy as np import torch from jina import DocumentArray, Executor, requests from jina.excepts import BadDocType from .audio_clip.model impo...
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' from typing import Any, Iterable, Optional import librosa as lr import numpy as np import torch from jina import DocumentArray, Executor, requests from jina.excepts import BadDocType from .audio_clip.model impo...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from docarray import BaseDoc, DocList from docarray.base_doc import AnyDoc def test_generic_init(): class Text(BaseDoc): text: str da = DocList[Text]([]) da.doc_type == Text assert isinstance(da, DocList) def test_normal_access_init(): da = DocList([]) da.doc_type == AnyDoc as...
from __future__ import annotations import csv import logging import os import numpy as np from sentence_transformers import InputExample logger = logging.getLogger(__name__) class CEBinaryAccuracyEvaluator: """ This evaluator can be used with the CrossEncoder class. It is designed for CrossEncoders w...
import csv import logging import os from typing import List import numpy as np from sentence_transformers import InputExample logger = logging.getLogger(__name__) class CEBinaryAccuracyEvaluator: """ This evaluator can be used with the CrossEncoder class. It is designed for CrossEncoders with 1 output...
from __future__ import annotations from sentence_transformers.similarity_functions import SimilarityFunction __all__ = ["SimilarityFunction"]
from sentence_transformers.similarity_functions import SimilarityFunction __all__ = ["SimilarityFunction"]
from keras.src.api_export import keras_export from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"]) class AveragePooling3D(BasePooling): """Average pooling operation for 3D data (spatial or spatio-temporal). Downsamples the in...
from keras.src.api_export import keras_export from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"]) class AveragePooling3D(BasePooling): """Average pooling operation for 3D data (spatial or spatio-temporal). Downsamples the in...
"""Test pydantic output parser.""" import pytest from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.output_parsers.pydantic import PydanticOutputParser from llama_index.core.llms import ChatMessage, TextBlock, ImageBlock class AttrDict(BaseModel): test_attr: str foo: int class Tes...
"""Test pydantic output parser.""" import pytest from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.output_parsers.pydantic import PydanticOutputParser class AttrDict(BaseModel): test_attr: str foo: int class TestModel(BaseModel): __test__ = False title: str attr_dict:...
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='1111', kv_stride=2), ...
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='1111', kv_stride=2), ...
from urllib.parse import urlparse, urlunparse import pytest from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface from llama_index.postprocessor.nvidia_rerank.utils import BASE_URL import respx @pytest.fixture() def mock_v1_local_models2(respx_mock: respx.MockRouter, base_url: str) -> None: ...
from urllib.parse import urlparse, urlunparse import pytest from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface from llama_index.postprocessor.nvidia_rerank.utils import BASE_URL import respx @pytest.fixture() def mock_v1_local_models2(respx_mock: respx.MockRouter, base_url: str) -> None: ...
"""Standard LangChain interface tests""" from pathlib import Path from typing import Literal, cast from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_anthropic import ChatAnthrop...
"""Standard LangChain interface tests""" from pathlib import Path from typing import Literal, cast from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_anthropic import ChatAnthrop...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.core.bbox.coder import (DeltaXYWHBBoxCoder, TBLRBBoxCoder, YOLOBBoxCoder) def test_yolo_bbox_coder(): coder = YOLOBBoxCoder() bboxes = torch.Tensor([[-42., -29., 74., 61.], [-10., -29., 10...
import pytest import torch from mmdet.core.bbox.coder import (DeltaXYWHBBoxCoder, TBLRBBoxCoder, YOLOBBoxCoder) def test_yolo_bbox_coder(): coder = YOLOBBoxCoder() bboxes = torch.Tensor([[-42., -29., 74., 61.], [-10., -29., 106., 61.], [22., -29.,...
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from sentence_transformers.evaluation import MSEEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.sparse_encoder import SparseEncoder logger = logging.getLogger(__nam...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from sentence_transformers.evaluation import MSEEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.sparse_encoder import SparseEncoder logger = logging.getLogger(__nam...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import torch from mmcv import Config, DictAction from mmdet.models import build_detector try: from mmcv.cnn import get_model_complexity_info except ImportError: raise ImportError('Please upgrade mmcv to >0.6.2') def parse_args(): parser = ...
import argparse import torch from mmcv import Config, DictAction from mmdet.models import build_detector try: from mmcv.cnn import get_model_complexity_info except ImportError: raise ImportError('Please upgrade mmcv to >0.6.2') def parse_args(): parser = argparse.ArgumentParser(description='Train a det...
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from mmdet.registry import MODELS MODELS.register_module('Linear', module=nn.Linear) @MODELS.register_module(name='NormedLinear') class NormedLinear(nn.Linear): """Normaliz...
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmdet.registry import MODELS MODELS.register_module('Linear', module=nn.Linear) @MODELS.register_module(name='NormedLinear') class NormedLinear(nn.Linear): """Normalized Linear Layer. Arg...
# Copyright (c) OpenMMLab. All rights reserved. from .det_tta import DetTTAModel from .merge_augs import (merge_aug_bboxes, merge_aug_masks, merge_aug_proposals, merge_aug_results, merge_aug_scores) __all__ = [ 'merge_aug_bboxes', 'merge_aug_masks', 'merge_aug_prop...
# Copyright (c) OpenMMLab. All rights reserved. from .merge_augs import (merge_aug_bboxes, merge_aug_masks, merge_aug_proposals, merge_aug_results, merge_aug_scores) __all__ = [ 'merge_aug_bboxes', 'merge_aug_masks', 'merge_aug_proposals', 'merge_aug_scores', '...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from itertools import groupby from typing import Dict, Iterable from jina import DocumentArray, Executor, requests class SimpleRanker(Executor): """ :class:`SimpleRanker` aggregates the score of the ma...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from itertools import groupby from typing import Dict, Iterable from jina import DocumentArray, Executor, requests class SimpleRanker(Executor): """ :class:`SimpleRanker` aggregates the score of the ma...
from __future__ import annotations from dataclasses import field from typing import Any, Callable import torch from sentence_transformers.data_collator import SentenceTransformerDataCollator class CrossEncoderDataCollator(SentenceTransformerDataCollator): """Collator for a CrossEncoder model. This encodes ...
from __future__ import annotations from dataclasses import field from typing import Any, Callable import torch from sentence_transformers.data_collator import SentenceTransformerDataCollator class CrossEncoderDataCollator(SentenceTransformerDataCollator): """Collator for a CrossEncoder model. This encodes ...
_base_ = 'faster-rcnn_r50-caffe-dc5_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args=_base_.backend_args), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (13...
_base_ = 'faster-rcnn_r50-caffe-dc5_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args=_base_.backend_args), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), ...
"""Test simple function agent.""" from typing import Any, Dict, Tuple import pytest from llama_index.core.agent.custom.simple_function import FnAgentWorker def mock_foo_fn_no_state_param() -> Tuple[None, bool]: """Mock agent input function without a state.""" return None, True def mock_foo_fn(state: dict) ...
"""Test simple function agent.""" from typing import Any, Dict, Tuple import pytest from llama_index.core.agent.custom.simple_function import FnAgentWorker def mock_foo_fn_no_state_param() -> Tuple[None, bool]: """Mock agent input function without a state.""" return None, True def mock_foo_fn(state: dict) ...
import platform import sys from pathlib import Path import pkg_resources from setuptools import find_packages, setup def read_version(fname="whisper/version.py"): exec(compile(open(fname, encoding="utf-8").read(), fname, "exec")) return locals()["__version__"] requirements = [] if sys.platform.startswith("...
import platform import sys from pathlib import Path import pkg_resources from setuptools import find_packages, setup def read_version(fname="whisper/version.py"): exec(compile(open(fname, encoding="utf-8").read(), fname, "exec")) return locals()["__version__"] requirements = [] if sys.platform.startswith("...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray')...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray')...
import hashlib import secrets from typing import NamedTuple class APIKeyContainer(NamedTuple): """Container for API key parts.""" raw: str prefix: str postfix: str hash: str class APIKeyManager: PREFIX: str = "agpt_" PREFIX_LENGTH: int = 8 POSTFIX_LENGTH: int = 8 def generate_a...
from typing import NamedTuple import secrets import hashlib class APIKeyContainer(NamedTuple): """Container for API key parts.""" raw: str prefix: str postfix: str hash: str class APIKeyManager: PREFIX: str = "agpt_" PREFIX_LENGTH: int = 8 POSTFIX_LENGTH: int = 8 def generate_api_...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads.autoassign_head import AutoAssignHead from mmdet.models.dense_heads.paa_head import levels_to_images def test_autoassign_head_loss(): """Tests autoassign head loss when truth is empty and non-empty.""" s =...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads.autoassign_head import AutoAssignHead from mmdet.models.dense_heads.paa_head import levels_to_images def test_autoassign_head_loss(): """Tests autoassign head loss when truth is empty and non-empty.""" s =...
from __future__ import annotations from torch import Tensor, nn from sentence_transformers.cross_encoder import CrossEncoder # TODO: Consider the naming of this class class CrossEntropyLoss(nn.Module): def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None: ...
from __future__ import annotations from torch import Tensor, nn from sentence_transformers.cross_encoder import CrossEncoder # TODO: Consider the naming of this class class CrossEntropyLoss(nn.Module): def __init__(self, model: CrossEncoder) -> None: super().__init__() self.model = model ...
import pytest import torch from docarray.computation.torch_backend import TorchCompBackend def test_to_device(): t = torch.rand(10, 3) assert t.device == torch.device('cpu') t = TorchCompBackend.to_device(t, 'meta') assert t.device == torch.device('meta') @pytest.mark.parametrize( 'array,result...
import torch from docarray.computation.torch_backend import TorchCompBackend def test_to_device(): t = torch.rand(10, 3) assert t.device == torch.device('cpu') t = TorchCompBackend.to_device(t, 'meta') assert t.device == torch.device('meta') def test_empty(): tensor = TorchCompBackend.empty((10...
import torch import torchaudio.prototype.functional as F from parameterized import parameterized from torch.autograd import gradcheck, gradgradcheck from torchaudio_unittest.common_utils import nested_params, TestBaseMixin class AutogradTestImpl(TestBaseMixin): @nested_params( [F.convolve, F.fftconvolve],...
import torch import torchaudio.prototype.functional as F from torch.autograd import gradcheck, gradgradcheck from torchaudio_unittest.common_utils import nested_params, TestBaseMixin class AutogradTestImpl(TestBaseMixin): @nested_params( [F.convolve, F.fftconvolve], ["full", "valid", "same"], ...
from typing import List, Union class InputExample: """Structure for one input example with texts, the label and a unique id""" def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0): """ Creates one InputExample with the given texts, guid and label ...
from typing import Union, List class InputExample: """Structure for one input example with texts, the label and a unique id""" def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0): """ Creates one InputExample with the given texts, guid and label ...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.3.0' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.2.0' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
import time import pytest from backend.util.decorator import async_error_logged, error_logged, time_measured @time_measured def example_function(a: int, b: int, c: int) -> int: time.sleep(0.5) return a + b + c @error_logged(swallow=True) def example_function_with_error_swallowed(a: int, b: int, c: int) ->...
import time from backend.util.decorator import error_logged, time_measured @time_measured def example_function(a: int, b: int, c: int) -> int: time.sleep(0.5) return a + b + c @error_logged def example_function_with_error(a: int, b: int, c: int) -> int: raise ValueError("This is a test error") def te...
from abc import ABC, abstractmethod from typing import Dict, Iterator, List, Optional, Type from typing_extensions import TYPE_CHECKING if TYPE_CHECKING: from docarray import BaseDoc, DocArray class AbstractDocStore(ABC): @staticmethod @abstractmethod def list(namespace: str, show_table: bool) -> Li...
from abc import ABC, abstractmethod from typing import Dict, Iterator, List, Optional, Type from typing_extensions import TYPE_CHECKING if TYPE_CHECKING: from docarray import BaseDocument, DocumentArray class AbstractDocStore(ABC): @staticmethod @abstractmethod def list(namespace: str, show_table: b...
import tempfile from collections.abc import Generator from typing import cast import pytest from langchain_core.documents import Document from langchain.storage._lc_store import create_kv_docstore, create_lc_store from langchain.storage.file_system import LocalFileStore @pytest.fixture def file_store() -> Generator...
import tempfile from typing import Generator, cast import pytest from langchain_core.documents import Document from langchain.storage._lc_store import create_kv_docstore, create_lc_store from langchain.storage.file_system import LocalFileStore @pytest.fixture def file_store() -> Generator[LocalFileStore, None, None...
_base_ = './mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py' model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
_base_ = ['co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py'] model = dict(backbone=dict(drop_path_rate=0.5)) param_scheduler = [dict(type='MultiStepLR', milestones=[30])] train_cfg = dict(max_epochs=36)
_base_ = ['co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py'] model = dict(backbone=dict(drop_path_rate=0.5)) param_scheduler = [dict(milestones=[30])] train_cfg = dict(max_epochs=36)
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.9.1' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versio...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.9.0' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versio...
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators from torchvision.transforms import InterpolationMode # usort: skip from ._utils import is_simple_tensor # usort: skip from ._meta import ( clamp_bounding_box, convert_format_bounding_box, ...
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators from torchvision.transforms import InterpolationMode # usort: skip from ._utils import is_simple_tensor # usort: skip from ._meta import ( clamp_bounding_box, convert_format_bounding_box, ...
import pytest from docarray import DocumentArray from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.qdrant import QdrantConfig from docarray.array.storage.weaviate...
import pytest from docarray import DocumentArray from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.qdrant import QdrantConfig from docarray.array.storage.weaviate...
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional # usort: skip from ._transform import Transform # usort: skip from ._presets import StereoMatching # usort: skip from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste from ._au...
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional # usort: skip from ._transform import Transform # usort: skip from ._presets import StereoMatching # usort: skip from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste from ._au...
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py'] model = dict( data_preprocessor=dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False), backbone=dict( ...
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( data_preprocessor=dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False), backbone=dic...
import os from typing import Dict from hubble.executor.helper import parse_hub_uri from hubble.executor.hubio import HubIO from jina import __default_executor__, __version__ from jina.enums import PodRoleType def get_image_name(uses: str) -> str: """The image can be provided in different formats by the user. ...
import os from typing import Dict from jina import __default_executor__, __version__ from jina.enums import PodRoleType from jina.hubble.helper import parse_hub_uri from jina.hubble.hubio import HubIO def get_image_name(uses: str) -> str: """The image can be provided in different formats by the user. This fu...
import json import multiprocessing import os import time import pytest from jina.helper import random_port from jina.parsers import set_gateway_parser from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes.worker import WorkerRuntime from tests.helper import ( _generate_pod_args, _val...
import json import multiprocessing import os import time import pytest from jina.helper import random_port from jina.parsers import set_gateway_parser, set_pod_parser from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes.worker import WorkerRuntime from tests.helper import ( _validate_cu...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class AutoAssign(SingleStageDetector): """Implementation of `AutoAssign: Differentiable Label Assignment for Dense Object Detection <https://arxiv.org/abs/...
from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class AutoAssign(SingleStageDetector): """Implementation of `AutoAssign: Differentiable Label Assignment for Dense Object Detection <https://arxiv.org/abs/2007.03496>`_.""" def __init__(self, ...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .cornernet import CornerNet from .d2_wrapper import Detectron2Wrapper from .ddod import DDOD from .deformab...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .cornernet import CornerNet from .ddod import DDOD from .deformable_detr import DeformableDETR from .detr i...
# Copyright (c) OpenMMLab. All rights reserved. import datetime import os.path as osp from typing import Optional from mmengine.fileio import dump from . import root from .registry import Registry def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list: """Traverse the whole registry tree fr...
# Copyright (c) OpenMMLab. All rights reserved. import datetime import os.path as osp from typing import Optional from mmengine.fileio import dump from . import root from .registry import Registry def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list: """Traverse the whole registry tree fr...
from typing import List, cast from llama_index.core.indices.vector_store.base import VectorStoreIndex from llama_index.core.schema import ( Document, NodeRelationship, QueryBundle, RelatedNodeInfo, TextNode, ImageNode, ) from llama_index.core.vector_stores.simple import SimpleVectorStore def ...
from typing import List, cast from llama_index.core.indices.vector_store.base import VectorStoreIndex from llama_index.core.schema import ( Document, NodeRelationship, QueryBundle, RelatedNodeInfo, TextNode, ) from llama_index.core.vector_stores.simple import SimpleVectorStore def test_simple_que...
_base_ = 'cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', ini...
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', ...
# Copyright (c) OpenMMLab. All rights reserved. import unittest import torch import torch.nn as nn from mmengine.runner import autocast from mmengine.utils import digit_version from mmengine.utils.dl_utils import TORCH_VERSION class TestAmp(unittest.TestCase): def test_autocast(self): if not torch.cuda...
# Copyright (c) OpenMMLab. All rights reserved. import unittest import torch import torch.nn as nn from mmengine.runner import autocast from mmengine.utils import TORCH_VERSION, digit_version class TestAmp(unittest.TestCase): def test_autocast(self): if not torch.cuda.is_available(): if dig...
import numpy as np from docarray.proto import DocumentProto, NdArrayProto, NodeProto from docarray.typing import Tensor def test_nested_item_proto(): NodeProto(text='hello') NodeProto(nested=DocumentProto()) def test_nested_optional_item_proto(): NodeProto() def test_ndarray(): nd_proto = NdArray...
import numpy as np from docarray.proto import DocumentProto, NdArrayProto, NodeProto from docarray.typing import Tensor def test_nested_item_proto(): NodeProto(text='hello') NodeProto(nested=DocumentProto()) def test_nested_optional_item_proto(): NodeProto() def test_ndarray(): nd_proto = NdArray...
import time import pytest from jina import Executor, Flow SLOW_EXECUTOR_SLEEP_TIME = 3 class SlowExecutor(Executor): def __init__(self, **kwargs): super().__init__(**kwargs) time.sleep(SLOW_EXECUTOR_SLEEP_TIME) @pytest.mark.asyncio @pytest.mark.parametrize('protocol', ['grpc', 'http', 'websoc...
import threading import time import pytest from jina import Executor, Flow SLOW_EXECUTOR_SLEEP_TIME = 3 class SlowExecutor(Executor): def __init__(self, **kwargs): super().__init__(**kwargs) time.sleep(SLOW_EXECUTOR_SLEEP_TIME) @pytest.mark.asyncio @pytest.mark.parametrize('protocol', ['grpc'...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["FluxPipe...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["FluxPipe...
import importlib from typing import Any from langchain.retrievers.document_compressors.base import DocumentCompressorPipeline from langchain.retrievers.document_compressors.chain_extract import ( LLMChainExtractor, ) from langchain.retrievers.document_compressors.chain_filter import ( LLMChainFilter, ) from la...
import importlib from typing import Any from langchain.retrievers.document_compressors.base import DocumentCompressorPipeline from langchain.retrievers.document_compressors.chain_extract import ( LLMChainExtractor, ) from langchain.retrievers.document_compressors.chain_filter import ( LLMChainFilter, ) from la...
""" This basic example loads a pre-trained model from the web and uses it to generate sentence embeddings for a given list of sentences. """ import logging import numpy as np from sentence_transformers import LoggingHandler, SentenceTransformer #### Just some code to print debug information to stdout np.set_printop...
""" This basic example loads a pre-trained model from the web and uses it to generate sentence embeddings for a given list of sentences. """ from sentence_transformers import SentenceTransformer, LoggingHandler import numpy as np import logging #### Just some code to print debug information to stdout np.set_printopti...
#!/usr/bin/env python from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.convert_to_parquet import ConvertToParquetCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.r...
#!/usr/bin/env python from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestComm...
from .clip_image import CLIPImageEncoder
from .clip_image import CLIPImageEncoder