input
stringlengths
33
5k
output
stringlengths
32
5k
import os from typing import Dict DEPLOYMENT_FILES = [ 'statefulset-executor', 'deployment-executor', 'deployment-gateway', 'deployment-uses-before', 'deployment-uses-after', 'deployment-uses-before-after', ] cur_dir = os.path.dirname(__file__) DEFAULT_RESOURCE_DIR = os.path.join( cur_dir,...
import os from typing import Dict DEPLOYMENT_FILES = [ 'statefulset-executor', 'deployment-executor', 'deployment-gateway', 'deployment-uses-before', 'deployment-uses-after', 'deployment-uses-before-after', ] cur_dir = os.path.dirname(__file__) DEFAULT_RESOURCE_DIR = os.path.join( cur_dir,...
# dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') ...
# dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') ...
import os from typing import Callable, List import numpy as np import pytest import torch from jina import Document, DocumentArray from jinahub.encoder.transform_encoder import TransformerTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_compute_tokens(): enc = TransformerTorchEncoder()...
import os from typing import Callable, List import numpy as np import pytest import torch from jina import Document, DocumentArray from jinahub.encoder.transform_encoder import TransformerTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_compute_tokens(): enc = TransformerTorchEncoder(b...
"""A class for JAX specific optimizer logic. Its purpose is to route around statelessness requirements in cond ops used for EMA handling and gradient accumulation handling. We do this by skipping conditionals entirely. """ import jax from jax import numpy as jnp from keras.src.optimizers import base_optimizer clas...
"""A class for JAX specific optimizer logic. Its purpose is to route around statelessness requirements in cond ops used for EMA handling and gradient accumulation handling. We do this by skipping conditionals entirely. """ import jax from jax import numpy as jnp from keras.src.optimizers import base_optimizer clas...
from __future__ import annotations import logging from datasets import load_dataset from sentence_transformers.evaluation import SequentialEvaluator from sentence_transformers.models import Pooling, Transformer from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.e...
from __future__ import annotations import logging from datasets import load_dataset from sentence_transformers.evaluation import SequentialEvaluator from sentence_transformers.models import Pooling, Transformer from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.e...
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from .autograd_test_impl import AutogradTestFloat32, AutogradTestMixin @skipIfNoCuda class AutogradCUDATest(AutogradTestMixin, PytorchTestCase): device = "cuda" @skipIfNoCuda class AutogradRNNTCUDATest(AutogradTestFloat32, PytorchTestCa...
from torchaudio_unittest.common_utils import ( PytorchTestCase, skipIfNoCuda, ) from .autograd_test_impl import AutogradTestMixin, AutogradTestFloat32 @skipIfNoCuda class AutogradCUDATest(AutogradTestMixin, PytorchTestCase): device = "cuda" @skipIfNoCuda class AutogradRNNTCUDATest(AutogradTestFloat32, ...
# Copyright (c) OpenMMLab. All rights reserved. """Collecting some commonly used type hint in mmdetection.""" from typing import List, Optional, Union from mmengine.config import ConfigDict from mmengine.data import InstanceData from ..bbox.samplers import SamplingResult from ..data_structures import DetDataSample #...
# Copyright (c) OpenMMLab. All rights reserved. """Collecting some commonly used type hint in mmdetection.""" from typing import List, Optional, Union from mmengine.config import ConfigDict from mmengine.data import InstanceData from ..bbox.samplers import SamplingResult from ..data_structures import DetDataSample #...
# Copyright (c) OpenMMLab. All rights reserved. from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, MaxIoUAssigner, RegionAssigner) from .builder import build_assigner, build_bbox_coder, build_sampler from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxC...
# Copyright (c) OpenMMLab. All rights reserved. from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, MaxIoUAssigner, RegionAssigner) from .builder import build_assigner, build_bbox_coder, build_sampler from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder, ...
import json from typing import Any, Type, TypeGuard, TypeVar, overload import jsonschema from fastapi.encoders import jsonable_encoder from pydantic import BaseModel from .type import type_match def to_dict(data) -> dict: if isinstance(data, BaseModel): data = data.model_dump() return jsonable_encod...
import json from typing import Any, Type, TypeGuard, TypeVar, overload import jsonschema from fastapi.encoders import jsonable_encoder from pydantic import BaseModel from .type import type_match def to_dict(data) -> dict: if isinstance(data, BaseModel): data = data.model_dump() return jsonable_encod...
import pytest import datasets import datasets.config # Import fixture modules as plugins pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def pytest_collection_modifyitems(config, items): # Mark tests as "unit" by default if not marked as "integration" (or already marked...
import pytest import datasets import datasets.config # Import fixture modules as plugins pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def pytest_collection_modifyitems(config, items): # Mark tests as "unit" by default if not marked as "integration" (or already marked...
_base_ = './ga-retinanet_r101-caffe_fpn_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomResize', scale=[(1333, 480), (1333, 960)], keep_ratio=True), dict(type='RandomFlip...
_base_ = './ga-retinanet_r101-caffe_fpn_1x_coco.py' train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomResize', scale=[(1333, 480), (1333, 960)], keep_ratio=True), ...
"""Abstract interface for document loader implementations.""" from __future__ import annotations from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Optional from langchain_core.runnables import run_in_executor if TYPE_CHECKING: from collections.abc import AsyncIterator, Iterator from lan...
"""Abstract interface for document loader implementations.""" from __future__ import annotations from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Optional from langchain_core.runnables import run_in_executor if TYPE_CHECKING: from collections.abc import AsyncIterator, Iterator from lan...
"""XGBoost: eXtreme Gradient Boosting library. Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md """ from . import tracker # noqa from . import collective, dask from .core import ( Booster, DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix, _py_version, bui...
"""XGBoost: eXtreme Gradient Boosting library. Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md """ from . import tracker # noqa from . import collective, dask from .core import Booster, DataIter, DMatrix, QuantileDMatrix, _py_version, build_info from .tracker import RabitTracker # noqa fro...
from typing import Any, Callable, Optional, Sequence from llama_index.core.base.embeddings.base import ( BaseEmbedding, SimilarityMode, similarity, ) from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.prompts.mixin import PromptDictType from llama_index.core....
from typing import Any, Callable, Optional, Sequence from llama_index.core.base.embeddings.base import ( BaseEmbedding, SimilarityMode, similarity, ) from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.prompts.mixin import PromptDictType from llama_index.core....
from pathlib import Path from typing import List import pytest from executor.audioclip_text import AudioCLIPTextEncoder from jina import Document, DocumentArray, Executor _EMBEDDING_DIM = 1024 @pytest.fixture(scope='module') def basic_encoder() -> AudioCLIPTextEncoder: return AudioCLIPTextEncoder( model...
from pathlib import Path from typing import List import pytest from executor.audioclip_text import AudioCLIPTextEncoder from jina import Document, DocumentArray, Executor _EMBEDDING_DIM = 1024 @pytest.fixture(scope='module') def basic_encoder() -> AudioCLIPTextEncoder: return AudioCLIPTextEncoder( model...
from sentence_transformers import models from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling print("# ------------------------------------------example with v2 distill-----------------------------------------") doc_en...
import numpy as np from sentence_transformers import models from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling print("# ------------------------------------------example with v2 distill------------------------------...
import os import sys import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm from xgboost.core import DataSplitMode pytestmark = pytest.mark.skipif( tm.no_arrow()["condition"] or tm.no_pandas()["condition"], reason=tm.no_arrow()["reason"] + " or " + tm.no_pandas()["reason"], ...
import os import sys import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm from xgboost.core import DataSplitMode try: import pandas as pd import pyarrow as pa import pyarrow.csv as pc except ImportError: pass pytestmark = pytest.mark.skipif( tm.no_arrow()["con...
from typing import Any, Dict, Optional, Union import numpy as np import PIL.Image import torch from torchvision import datapoints from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2.utils import is_simple_tensor class PILToTensor(Transform): """[BETA] Convert a PIL I...
from typing import Any, Dict, Optional, Union import numpy as np import PIL.Image import torch from torchvision import datapoints from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2.utils import is_simple_tensor class PILToTensor(Transform): """[BETA] Convert a PIL I...
import datetime import prisma.fields import prisma.models import pytest import backend.server.v2.library.model as library_model @pytest.mark.asyncio async def test_agent_preset_from_db(): # Create mock DB agent db_agent = prisma.models.AgentPreset( id="test-agent-123", createdAt=datetime.dat...
import datetime import prisma.fields import prisma.models import pytest import backend.server.v2.library.model as library_model from backend.util import json @pytest.mark.asyncio async def test_agent_preset_from_db(): # Create mock DB agent db_agent = prisma.models.AgentPreset( id="test-agent-123", ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, Optional, Sequence from ..registry import HOOKS from ..utils import get_git_hash from .hook import Hook DATA_BATCH = Optional[Sequence[dict]] @HOOKS.register_module() class RuntimeInfoHook(Hook): """A hook that updates runtime information ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, Optional, Sequence from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Sequence[dict]] @HOOKS.register_module() class RuntimeInfoHook(Hook): """A hook that updates runtime information into message hub. E.g...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess import numpy as np import pytest from executor.audioclip_image import AudioCLIPImageEncoder from jina import Document, DocumentArray, Flow @pytest.mark.parametrize("request_size", [1, 10, 50,...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess import numpy as np import pytest from executor.audioclip_image import AudioCLIPImageEncoder from jina import Document, DocumentArray, Flow @pytest.mark.parametrize("request_size", [1, 10, 50,...
# Copyright (c) OpenMMLab. All rights reserved. from .averaged_model import (ExponentialMovingAverage, MomentumAnnealingEMA, StochasticWeightAverage) from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor from .base_module import BaseModule from .utils import detect_an...
# Copyright (c) OpenMMLab. All rights reserved. from .averaged_model import (ExponentialMovingAverage, MomentumAnnealingEMA, StochasticWeightAverage) from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor from .base_module import BaseModule from .utils import detect_an...
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' # model settings model = dict( data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=32), backbone=dict( type='ResNeXt'...
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' # model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( preprocess_cfg=preprocess_cfg, backbone=dict( type='ResNeXt', depth=101, ...
import pytest import datasets import datasets.config # Import fixture modules as plugins pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def pytest_collection_modifyitems(config, items): # Mark tests as "unit" by default if not marked as "integration" (or already marked...
import pytest import datasets import datasets.config # Import fixture modules as plugins pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def pytest_collection_modifyitems(config, items): # Mark tests as "unit" by default if not marked as "integration" (or already marked...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
from typing import Union, BinaryIO, TYPE_CHECKING from docarray.document.mixins.helper import _uri_to_blob, _get_file_context if TYPE_CHECKING: # pragma: no cover from docarray.typing import T class UriFileMixin: """Provide helper functions for :class:`Document` to dump content to a file.""" def save_...
from typing import Union, BinaryIO, TYPE_CHECKING from docarray.document.mixins.helper import _uri_to_blob, _get_file_context if TYPE_CHECKING: from docarray.typing import T class UriFileMixin: """Provide helper functions for :class:`Document` to dump content to a file.""" def save_uri_to_file(self: 'T...
from __future__ import annotations from typing import Any import torch from torch import nn from transformers import AutoConfig, AutoModelForMaskedLM, AutoTokenizer class MLMTransformer(nn.Module): """A minimal Transformer model that uses MLM (Masked Language Modeling). This model implements only the essen...
from __future__ import annotations from typing import Any import torch from torch import nn from transformers import AutoConfig, AutoModelForMaskedLM, AutoTokenizer class MLMTransformer(nn.Module): """A minimal Transformer model that uses MLM (Masked Language Modeling). This model implements only the essen...
from . import InputExample import gzip import os class NLIDataReader(object): """Reads in the Stanford NLI dataset and the MultiGenre NLI dataset""" def __init__(self, dataset_folder): self.dataset_folder = dataset_folder def get_examples(self, filename, max_examples=0): """ data...
from . import InputExample import gzip import os class NLIDataReader(object): """ Reads in the Stanford NLI dataset and the MultiGenre NLI dataset """ def __init__(self, dataset_folder): self.dataset_folder = dataset_folder def get_examples(self, filename, max_examples=0): """ ...
import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_document.io.json import orjson_dumps from docarray.typing import TorchEmbedding, TorchTensor def test_proto_tensor(): tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224)) tensor._to_node_protobuf()...
import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_document.io.json import orjson_dumps from docarray.typing import TorchEmbedding, TorchTensor def test_proto_tensor(): tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224)) tensor._to_node_protobuf()...
_base_ = [ '../_base_/models/cascade-mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvisio...
_base_ = [ '../_base_/models/cascade-mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvisio...
import types from typing_extensions import TYPE_CHECKING from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.typing.tensor.image.image_tensor import ImageTensor from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: ...
from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.typing.tensor.image.image_tensor import ImageTensor __all__ = ['ImageNdArray', 'ImageTensor'] from docarray.utils._internal.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: ...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' input_size = 300 train_pipeline = [ dict(type='LoadImageFromFile'), dict(type=...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' input_size = 300 train_pipeline = [ dict(type='LoadImageFromFile'), dict(type=...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import mmengine from mmengine.utils import digit_version from .version import __version__, version_info mmcv_minimum_version = '2.0.0rc4' mmcv_maximum_version = '2.2.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.7.1' mmengi...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import mmengine from mmengine.utils import digit_version from .version import __version__, version_info mmcv_minimum_version = '2.0.0rc4' mmcv_maximum_version = '2.1.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.7.1' mmengi...
_base_ = ['./mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa model = dict( backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa model = dict( backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway # keep import here for backwards compatibility from jina.serve.runtimes.gateway.gateway import BaseGateway from jina.serve.runtimes.servers.http import HTTPServer __all__ = ['HTTPGateway'] class HTTPGateway(HTTPServer, BaseGateway): """ ...
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway __all__ = ['HTTPGateway'] class HTTPGateway(FastAPIBaseGateway): """ :class:`HTTPGateway` is a FastAPIBaseGateway that uses the default FastAPI app """ @property def app(self): """Get the default base API app for HTT...
"""Argparser module for WorkerRuntime""" from jina.parsers.helper import KVAppendAction, add_arg_group from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser def mixin_worker_runtime_parser(parser): """Mixing in arguments required by :class:`WorkerRuntime` into the given parser. :par...
"""Argparser module for WorkerRuntime""" from jina import __default_host__, helper from jina.enums import PollingType from jina.parsers.helper import KVAppendAction, add_arg_group from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser def mixin_worker_runtime_parser(parser): """Mixing in ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import O365SendEvent from langchain_community.tools.office365.send_event import SendEventSchema # Create a way to dynamically look up deprecated imports. # Used to consolidate log...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import O365SendEvent from langchain_community.tools.office365.send_event import SendEventSchema # Create a way to dynamically look up deprecated imports. # Used to consolidate log...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( data_preprocessor=dict(batch_augments=[ dict( type='BatchSyncRandomResize', random_size_range=(320, 640), size_divisor=32, interval=10) ]), backbone=dict(deepen_factor=0.33, widen_fa...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( data_preprocessor=dict(batch_augments=[ dict( type='BatchSyncRandomResize', random_size_range=(320, 640), size_divisor=32, interval=10) ]), backbone=dict(deepen_factor=0.33, widen_fa...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine import Config from mmengine.structures import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import YOLOFHead class TestYOLOFHead(TestCase): def test_yolof_head_loss(self): "...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine import Config from mmengine.data import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import YOLOFHead class TestYOLOFHead(TestCase): def test_yolof_head_loss(self): """Test...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
from enum import Enum from typing import Any, Optional from langchain_core.callbacks import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document from langchain_core.retrievers import BaseRetriever from langchain_core.stores import BaseStore, Byt...
from enum import Enum from typing import Any, Optional from langchain_core.callbacks import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document from langchain_core.retrievers import BaseRetriever from langchain_core.stores import BaseStore, Byt...
from typing import Union, Iterable from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin from docarray.array.storage.registry import _REGISTRY from docarray import Document class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like methods for DocumentArray with weaviate as storag...
from typing import Union, Iterable from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin from docarray.array.storage.registry import _REGISTRY from docarray import Document class SequenceLikeMixin(BaseSequenceLikeMixin): """Implement sequence-like methods for DocumentArray with weaviate as storag...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import librosa import pytest from executor.vggish import vggish_input from jina import Document, DocumentArray, Flow cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_f...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import librosa import pytest from jina import Document, DocumentArray, Flow from ...vggish import vggish_input cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_flow_f...
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests."""
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests.""" pass
import warnings from typing import Optional, Union, TYPE_CHECKING, Callable import numpy as np from docarray.score import NamedScore if TYPE_CHECKING: from docarray import Document, DocumentArray class EvaluationMixin: """A mixin that provides ranking evaluation functionality to DocumentArrayLike objects""...
import warnings from typing import Optional, Union, TYPE_CHECKING, Callable import numpy as np from ...score import NamedScore if TYPE_CHECKING: from ... import Document, DocumentArray class EvaluationMixin: """A mixin that provides ranking evaluation functionality to DocumentArrayLike objects""" def ...
import importlib import os import re import types from typing import Any, Optional import numpy as np try: import torch # noqa: F401 except ImportError: torch_imported = False else: torch_imported = True try: import tensorflow as tf # type: ignore # noqa: F401 except (ImportError, TypeError): ...
import importlib import os import re import types from typing import Any, Optional import numpy as np try: import torch # noqa: F401 except ImportError: torch_imported = False else: torch_imported = True try: import tensorflow as tf # type: ignore # noqa: F401 except (ImportError, TypeError): ...
"""**Utility functions** for LangChain. These functions do not depend on any other LangChain module. """ from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: # for type checking and IDE support, we include the imports here # but we don't want to eagerly import them at runtim...
"""**Utility functions** for LangChain. These functions do not depend on any other LangChain module. """ from langchain_core.utils import image from langchain_core.utils.aiter import abatch_iterate from langchain_core.utils.env import get_from_dict_or_env, get_from_env from langchain_core.utils.formatting import Stri...
import multiprocessing import pytest from jina import Client from jina.parsers import set_gateway_parser, set_pod_parser from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes.worker import WorkerRuntime def _create_worker_runtime(...
import multiprocessing import pytest from jina import Client from jina.parsers import set_gateway_parser, set_pod_parser from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime from jina...
import warnings from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.utils._internal.misc import is_notebook if TYPE_CHECKING: from pydantic import Ba...
import warnings from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.utils.misc import is_notebook if TYPE_CHECKING: from pydantic import BaseConfig ...
_base_ = [ '../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] val_evaluator = dict(metric='proposal_fast') test_evaluator = val_evaluator # inference on val dataset and dump the proposals with evaluate metric # data...
_base_ = [ '../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] val_evaluator = dict(metric='proposal_fast') test_evaluator = val_evaluator
from typing import Literal from pydantic import SecretStr from backend.data.model import ( APIKeyCredentials, CredentialsField, CredentialsMetaInput, OAuth2Credentials, ) from backend.integrations.providers import ProviderName from backend.util.settings import Secrets secrets = Secrets() GITHUB_OAUTH...
from typing import Literal from pydantic import SecretStr from backend.data.model import ( APIKeyCredentials, CredentialsField, CredentialsMetaInput, OAuth2Credentials, ) from backend.util.settings import Secrets secrets = Secrets() GITHUB_OAUTH_IS_CONFIGURED = bool( secrets.github_client_id and ...
import copy import importlib import os import sys from keras.src import backend as backend_module from keras.src.api_export import keras_export from keras.src.backend.common import global_state def in_tf_graph(): if global_state.get_global_attribute("in_tf_graph_scope", False): return True if "tenso...
import copy import importlib import os import sys from keras.src import backend as backend_module from keras.src.api_export import keras_export from keras.src.backend.common import global_state def in_tf_graph(): if global_state.get_global_attribute("in_tf_graph_scope", False): return True if "tenso...
import logging import typing from autogpt_libs.auth import requires_admin_user from autogpt_libs.auth.depends import get_user_id from fastapi import APIRouter, Body, Depends from prisma import Json from prisma.enums import CreditTransactionType from backend.data.credit import admin_get_user_history, get_user_credit_m...
import logging import typing from autogpt_libs.auth import requires_admin_user from autogpt_libs.auth.depends import get_user_id from fastapi import APIRouter, Body, Depends from prisma import Json from prisma.enums import CreditTransactionType from backend.data.credit import admin_get_user_history, get_user_credit_m...
import torch from torchaudio.models import emformer_rnnt_model, RNNTBeamSearch from torchaudio_unittest.common_utils import TestBaseMixin, torch_script class RNNTBeamSearchTestImpl(TestBaseMixin): def _get_input_config(self): model_config = self._get_model_config() return { "batch_size...
import torch from torchaudio.models import emformer_rnnt_model, RNNTBeamSearch from torchaudio_unittest.common_utils import TestBaseMixin, torch_script class RNNTBeamSearchTestImpl(TestBaseMixin): def _get_input_config(self): model_config = self._get_model_config() return { "batch_size...
"""Standard LangChain interface tests.""" import pytest from langchain_core.language_models import BaseChatModel from langchain_tests.unit_tests import ChatModelUnitTests from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped] from langchain_anthropic import ChatAnthropic class TestAnt...
"""Standard LangChain interface tests""" import pytest from langchain_core.language_models import BaseChatModel from langchain_tests.unit_tests import ChatModelUnitTests from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped] from langchain_anthropic import ChatAnthropic class TestAnth...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from __future__ import annotations from collections.abc import Iterable from enum import Enum from typing import Any import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """The metric for the co...
from __future__ import annotations from collections.abc import Iterable from enum import Enum from typing import Any import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """The metric for the co...
from langchain_anthropic import __all__ EXPECTED_ALL = [ "ChatAnthropicMessages", "ChatAnthropic", "convert_to_anthropic_tool", "Anthropic", "AnthropicLLM", ] def test_all_imports() -> None: assert sorted(EXPECTED_ALL) == sorted(__all__)
from langchain_anthropic import __all__ EXPECTED_ALL = ["ChatAnthropicMessages", "ChatAnthropic", "Anthropic", "AnthropicLLM"] def test_all_imports() -> None: assert sorted(EXPECTED_ALL) == sorted(__all__)
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' import subprocess import pytest from jina import Document, DocumentArray, Flow from ...transform_encoder import TransformerTorchEncoder _EMBEDDING_DIM = 768 @pytest.mark.parametrize('request_size', [1, 10, 50, 10...
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' import subprocess from typing import Callable, List import pytest from jina import DocumentArray, Flow from ...transform_encoder import TransformerTorchEncoder @pytest.mark.parametrize('request_size', [1, 10, 50, ...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import tempfile from unittest import TestCase from unittest.mock import Mock import torch import torch.nn as nn from torch.utils.data import Dataset from mmengine.hooks import EMAHook from mmengine.model import ExponentialMovingAverage from mmengin...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import tempfile from unittest import TestCase from unittest.mock import Mock import torch import torch.nn as nn from torch.utils.data import Dataset from mmengine.hooks import EMAHook from mmengine.model import ExponentialMovingAverage from mmengin...
from langchain_huggingface.chat_models import ( ChatHuggingFace, # type: ignore[import-not-found] ) from langchain_huggingface.embeddings import ( HuggingFaceEmbeddings, HuggingFaceEndpointEmbeddings, ) from langchain_huggingface.llms import ( HuggingFaceEndpoint, HuggingFacePipeline, ) __all__ = ...
from langchain_huggingface.chat_models import ( ChatHuggingFace, # type: ignore[import-not-found] ) from langchain_huggingface.embeddings import ( HuggingFaceEmbeddings, HuggingFaceEndpointEmbeddings, ) from langchain_huggingface.llms import ( HuggingFaceEndpoint, HuggingFacePipeline, ) __all__ = ...
""" Prompts for implementing Chain of Abstraction. While official prompts are not given (and the paper finetunes models for the task), we can take inspiration and use few-shot prompting to generate a prompt for implementing chain of abstraction in an LLM agent. """ REASONING_PROMPT_TEMPALTE = """Generate an abstract ...
""" Prompts for implementing Chain of Abstraction. While official prompts are not given (and the paper finetunes models for the task), we can take inspiration and use few-shot prompting to generate a prompt for implementing chain of abstraction in an LLM agent. """ REASONING_PROMPT_TEMPALTE = """Generate an abstract ...
from __future__ import annotations try: from typing import Self except ImportError: from typing_extensions import Self import torch import transformers from PIL import Image from sentence_transformers.models.Asym import InputModule class CLIPModel(InputModule): save_in_root: bool = True def __init...
from __future__ import annotations import torch import transformers from PIL import Image from torch import nn class CLIPModel(nn.Module): save_in_root: bool = True def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None: super().__init__() if proce...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import subprocess import torch def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('in_file', help='input checkpoint filename') parser.add_argument('out_file', h...
import argparse import subprocess import torch def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('in_file', help='input checkpoint filename') parser.add_argument('out_file', help='output checkpoint filename') args = par...
from keras.src import activations from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.layer import Layer def _large_negative_number(dtype): """Return a Large negative number based on dtype.""" if backend.standardize_dtype(dtype) == "float16": return -3e4 ...
from keras.src import activations from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.layer import Layer def _large_negative_number(dtype): """Return a Large negative number based on dtype.""" if backend.standardize_dtype(dtype) == "float16": return -3e4 ...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class DoubleHeadRoIHead(StandardRoIHead): """RoI head for Double Head RCNN. https://arxiv.org/abs/1904.06493 """ def __init__(self, reg_roi_scale_factor...
from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class DoubleHeadRoIHead(StandardRoIHead): """RoI head for Double Head RCNN. https://arxiv.org/abs/1904.06493 """ def __init__(self, reg_roi_scale_factor, **kwargs): super(DoubleHeadRoIHead, se...
from collections.abc import Sequence from langchain_core.tools import BaseTool def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None: """Validate tools for single input. Args: class_name: Name of the class. tools: List of tools to validate. Raises: ...
from typing import Sequence from langchain_core.tools import BaseTool def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None: """Validate tools for single input. Args: class_name: Name of the class. tools: List of tools to validate. Raises: ValueErro...
# Copyright (c) OpenMMLab. All rights reserved. from .backends import (BaseStorageBackend, HTTPBackend, LmdbBackend, LocalBackend, MemcachedBackend, PetrelBackend, register_backend) from .file_client import FileClient, HardDiskBackend from .handlers import (BaseFileHandler,...
# Copyright (c) OpenMMLab. All rights reserved. from .file_client import (BaseStorageBackend, FileClient, HardDiskBackend, HTTPBackend, LmdbBackend, MemcachedBackend, PetrelBackend) from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler from .i...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import string import random import pytest import time import os cur_dir = os.path.dirname(os.path.abspath(__file__)) milvus_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml')) @pytest.fixture(scope='session', autouse=True) def start_storage(): os.system(f"docker compose -f {milvus_yml} up -d --r...
"""This module contains all classes used for composing graphs over indices.""" from llama_index.core.indices.composability.graph import ComposableGraph __all__ = ["ComposableGraph"]
"""This module contains all classes used for composing graphs over indices.""" from llama_index.core.indices.composability.graph import ComposableGraph __all__ = ["ComposableGraph"]
"""Tests for tf.distribute related functionality under tf implementation.""" import numpy as np import pytest import tensorflow as tf from tensorflow.python.eager import context from keras.src import backend from keras.src import layers from keras.src import models from keras.src import testing from keras.src.backend...
"""Tests for tf.distribute related functionality under tf implementation.""" import numpy as np import pytest import tensorflow as tf from tensorflow.python.eager import context from keras.src import backend from keras.src import layers from keras.src import models from keras.src import testing from keras.src.backend...
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer from llama_index.core.memory.chat_summary_memory_buffer import ChatSummaryMemoryBuffer from llama_index.core.memory.types import BaseMemory from llama_index.core.memory.vector_memory import VectorMemory from llama_index.core.memory.simple_composabl...
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer from llama_index.core.memory.chat_summary_memory_buffer import ChatSummaryMemoryBuffer from llama_index.core.memory.types import BaseMemory from llama_index.core.memory.vector_memory import VectorMemory from llama_index.core.memory.simple_composabl...
__version__ = '0.14.3' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
__version__ = '0.14.2' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) fil...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) fil...
import os import sys from typing import Iterator, TYPE_CHECKING import numpy as np if TYPE_CHECKING: from docarray import Document file_dir = os.path.dirname(__file__) sys.path.append(os.path.dirname(file_dir)) def random_docs( num_docs, chunks_per_doc=5, embed_dim=10, jitter=1, start_id=0,...
import os import sys from typing import Iterator, TYPE_CHECKING import numpy as np if TYPE_CHECKING: from jina import Document file_dir = os.path.dirname(__file__) sys.path.append(os.path.dirname(file_dir)) def random_docs( num_docs, chunks_per_doc=5, embed_dim=10, jitter=1, start_id=0, ...
import pytest from docarray import Document, DocumentArray from jina import Executor, requests from jina.clients.request import request_generator from jina.logging.logger import JinaLogger from jina.parsers import set_pod_parser from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler ...
import pytest from docarray import Document, DocumentArray from jina import Executor, requests from jina.logging.logger import JinaLogger from jina.parsers import set_pod_parser from jina.serve.runtimes.request_handlers.data_request_handler import ( DataRequestHandler, ) from jina.clients.request import request_ge...
from keras.src import ops from keras.src.api_export import keras_export from keras.src.layers.layer import Layer @keras_export("keras.layers.UnitNormalization") class UnitNormalization(Layer): """Unit normalization layer. Normalize a batch of inputs so that each input in the batch has a L2 norm equal to ...
from keras.src import ops from keras.src.api_export import keras_export from keras.src.layers.layer import Layer @keras_export("keras.layers.UnitNormalization") class UnitNormalization(Layer): """Unit normalization layer. Normalize a batch of inputs so that each input in the batch has a L2 norm equal to ...
import posixpath from pathlib import Path import fsspec import pytest from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path class MockFileSystem(AbstractFileSystem): protocol = "mock" def __init__(self, *args, local_root_dir, **kwargs): super().__init__() ...
import posixpath from pathlib import Path import fsspec import pytest from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path class MockFileSystem(AbstractFileSystem): protocol = "mock" def __init__(self, *args, local_root_dir, **kwargs): super().__init__() ...
import json from pathlib import Path from typing import Any, Callable, Optional, Tuple, Union import PIL.Image from .utils import download_and_extract_archive, verify_str_arg from .vision import VisionDataset class Food101(VisionDataset): """`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_ex...
import json from pathlib import Path from typing import Any, Callable, Optional, Tuple import PIL.Image from .utils import download_and_extract_archive, verify_str_arg from .vision import VisionDataset class Food101(VisionDataset): """`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/foo...
from abc import abstractmethod from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union from docarray import Document, DocumentArray from docarray.math import ndarray from docarray.score import NamedScore from qdrant_client.http import models from qdrant_client.http.models.models import Distanc...
from abc import abstractmethod from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union from docarray import Document, DocumentArray from docarray.math import ndarray from docarray.score import NamedScore from qdrant_client.http import models as rest from qdrant_client.http.models.models import...
#!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/3.x/configs' files = sorted(glob.glob('../../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirname(f...
#!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/configs' files = sorted(glob.glob('../../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirnam...
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' # model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( preprocess_cfg=preprocess_cfg, backbone=dict( type='ResNeXt', depth=101, ...
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval...
"""Retrieval evaluators.""" from typing import List, Optional, Tuple from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.bridge.pydantic import Field, SerializeAsAny from llama_index.core.evaluation.retrieval.base import ( BaseRetrievalEvaluator, RetrievalEvalMode, ) from llam...
"""Retrieval evaluators.""" from typing import List, Optional, Tuple from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.bridge.pydantic import Field, SerializeAsAny from llama_index.core.evaluation.retrieval.base import ( BaseRetrievalEvaluator, RetrievalEvalMode, ) from llam...
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union import numpy as np from docarray import Document, DocumentArray from docarray.array.mixins.find import FindMixin as BaseFindMixin from docarray.math import ndarray from docarray.math.ndarray import to_numpy_array from docarray.score impor...
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union import numpy as np from docarray import Document, DocumentArray from docarray.array.mixins.find import FindMixin as BaseFindMixin from docarray.math import ndarray from docarray.math.ndarray import to_numpy_array from docarray.score impor...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import List, Optional, Tuple import torch import torch.nn as nn from mmcv import ops from mmengine.model import BaseModule from torch import Tensor from mmdet.utils import ConfigType, OptMultiConfig class BaseRoIExtr...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import List, Optional, Tuple import torch import torch.nn as nn from mmcv import ops from mmengine.model import BaseModule from torch import Tensor from mmdet.utils import ConfigType, OptMultiConfig class BaseRoIExtr...
import os import librosa from jina import Executor, Document, DocumentArray from tensorflow.python.framework import ops from ...vggish import vggish_input from ...vggish_audio_encoder import VggishAudioEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_load(): encoder = Executor.load_config(...
import os import librosa from jina import Executor, Document, DocumentArray from tensorflow.python.framework import ops from ...vggish import vggish_input from ...vggish_audio_encoder import VggishAudioEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_load(): encoder = Executor.load_config(...
_base_ = [ '../_base_/models/cascade-rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, top_p: float = 0.9, max_...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, top_p: float = 0.9, max_...
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] num_things_classes = 80 num_stuff_classes = 0 num_classes = num_things_classes + num_stuff_classes image_size = (1024, 1024) batch_augments = [ dict( type='BatchFixedSizePad', size=image_size, img_pad_value=0, pad_mask=True...
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] num_things_classes = 80 num_stuff_classes = 0 num_classes = num_things_classes + num_stuff_classes model = dict( panoptic_head=dict( num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, loss_cls=dict(class_wei...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES @mmcv.jit(derivate=True, coderize=True) def ae_loss_per_image(tl_preds, br_preds, match): """Associative Embedding Loss in one image. Associative Embedd...
import mmcv import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES @mmcv.jit(derivate=True, coderize=True) def ae_loss_per_image(tl_preds, br_preds, match): """Associative Embedding Loss in one image. Associative Embedding Loss including two parts: pull loss and push...
from typing import Dict, Tuple, Optional, List import numpy as np from jina import Executor, DocumentArray, requests, Document from jina.types.arrays.memmap import DocumentArrayMemmap class SimpleIndexer(Executor): """ A simple indexer that stores all the Document data together, in a DocumentArrayMemmap ...
from typing import Dict, Tuple, Optional, List import numpy as np from jina import Executor, DocumentArray, requests, Document from jina.types.arrays.memmap import DocumentArrayMemmap class SimpleIndexer(Executor): """ A simple indexer that stores all the Document data together, in a DocumentArrayMemmap ...
from typing import Any, ForwardRef, Optional, Union from typing_extensions import get_origin from typing_inspect import get_args, is_typevar, is_union_type from docarray.typing.id import ID from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True i...
from typing import Any, ForwardRef, Optional from typing_extensions import get_origin from typing_inspect import get_args, is_typevar, is_union_type from docarray.typing.id import ID from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True if type ...
_base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py' # learning policy max_epochs = 24 train_cfg = dict(max_epochs=max_epochs) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, b...
_base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
""" This script contains an example how to perform semantic search with Qdrant. You need Qdrant up and running locally: https://qdrant.tech/documentation/quickstart/ Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.: ``` pip install qdrant-client ``` This script was create...
""" This script contains an example how to perform semantic search with Qdrant. You need Qdrant up and running locally: https://qdrant.tech/documentation/quickstart/ Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.: ``` pip install qdrant-client ``` This script was create...
"""Kept for backwards compatibility.""" from langchain_text_splitters import ( Language, RecursiveCharacterTextSplitter, TextSplitter, Tokenizer, TokenTextSplitter, ) from langchain_text_splitters.base import split_text_on_tokens from langchain_text_splitters.character import CharacterTextSplitter ...
"""Kept for backwards compatibility.""" from langchain_text_splitters import ( Language, RecursiveCharacterTextSplitter, TextSplitter, Tokenizer, TokenTextSplitter, ) from langchain_text_splitters.base import split_text_on_tokens from langchain_text_splitters.character import CharacterTextSplitter ...
# Copyright (c) OpenMMLab. All rights reserved. from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, TRANSFORMS, W...
# Copyright (c) OpenMMLab. All rights reserved. from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, TRANSFORMS, WEIGHT_INITIALIZERS) __all__ = [ ...
# Copyright (c) OpenMMLab. All rights reserved. import math from typing import Optional import torch import torch.nn as nn from mmengine.model import ExponentialMovingAverage from torch import Tensor from mmdet.registry import MODELS @MODELS.register_module() class ExpMomentumEMA(ExponentialMovingAverage): """E...
# Copyright (c) OpenMMLab. All rights reserved. import math from typing import Optional import torch import torch.nn as nn from mmengine.model import ExponentialMovingAverage from torch import Tensor from mmdet.registry import MODELS @MODELS.register_module() class ExpMomentumEMA(ExponentialMovingAverage): """E...
"""JSON node parser.""" import json from typing import Any, Dict, Generator, List, Optional, Sequence from llama_index.core.callbacks.base import CallbackManager from llama_index.core.node_parser.interface import NodeParser from llama_index.core.node_parser.node_utils import build_nodes_from_splits from llama_index.co...
"""JSON node parser.""" import json from typing import Any, Dict, Generator, List, Optional, Sequence from llama_index.core.callbacks.base import CallbackManager from llama_index.core.node_parser.interface import NodeParser from llama_index.core.node_parser.node_utils import build_nodes_from_splits from llama_index.co...