input
stringlengths
33
5k
output
stringlengths
32
5k
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py' ] model = dict( bbox_head=dict( num_classes=601, anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) # dataset settings dataset_typ...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py' ] model = dict( bbox_head=dict( num_classes=601, anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) # dataset settings dataset_typ...
_base_ = './vfnet_r50_fpn_1x_coco.py' train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomResize', scale=[(1333, 480), (1333, 960)], keep_ratio=True), dict(type='...
_base_ = './vfnet_r50_fpn_1x_coco.py' train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomResize', scale=[(1333, 480), (1333, 960)], keep_ratio=True), dict(type='...
"""**Callback handlers** allow listening to events in LangChain. **Class hierarchy:** .. code-block:: BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler """ from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: from langchain_core.callbacks.base im...
"""**Callback handlers** allow listening to events in LangChain. **Class hierarchy:** .. code-block:: BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler """ from langchain_core.callbacks.base import ( AsyncCallbackHandler, BaseCallbackHandler, BaseCallbackManager, Callb...
# Copyright (c) OpenMMLab. All rights reserved. from .checkloss_hook import CheckInvalidLossHook from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook from .memory_profiler_hook import MemoryProfilerHook from .set_epoch_info_hook import SetEpochInfoHook from .sync_norm_hook import SyncNormHook from .sync_random_si...
# Copyright (c) OpenMMLab. All rights reserved. from .checkloss_hook import CheckInvalidLossHook from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook from .memory_profiler_hook import MemoryProfilerHook from .set_epoch_info_hook import SetEpochInfoHook from .sync_norm_hook import SyncNormHook from .sync_random_si...
import itertools from dataclasses import dataclass from typing import Optional import pyarrow as pa import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ArrowConfig(datasets.BuilderConfig): """BuilderConfig for Arrow.""" features: Opt...
import itertools from dataclasses import dataclass from typing import Optional import pyarrow as pa import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ArrowConfig(datasets.BuilderConfig): """BuilderConfig for Arrow.""" features: Opt...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.layer import Layer @keras_export("keras.layers.Dropout") class Dropout(Layer): """Applies dropout to the input. The `Dropout` layer randomly sets input units to 0 with a frequency of `rate` at each step duri...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.layer import Layer @keras_export("keras.layers.Dropout") class Dropout(Layer): """Applies dropout to the input. The `Dropout` layer randomly sets input units to 0 with a frequency of `rate` at each step duri...
import json from collections.abc import Sequence from langchain_core.agents import AgentAction from langchain_core.messages import ( AIMessage, BaseMessage, ToolMessage, ) from langchain.agents.output_parsers.tools import ToolAgentAction def _create_tool_message( agent_action: ToolAgentAction, obser...
import json from typing import List, Sequence, Tuple from langchain_core.agents import AgentAction from langchain_core.messages import ( AIMessage, BaseMessage, ToolMessage, ) from langchain.agents.output_parsers.tools import ToolAgentAction def _create_tool_message( agent_action: ToolAgentAction, o...
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS from ._bounding_box import BoundingBoxes, BoundingBoxFormat from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image from ._mask im...
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS from ._bounding_box import BoundingBox, BoundingBoxFormat from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image from ._mask impo...
from .simple_indexer import SimpleIndexer
from .simple_indexer import SimpleIndexer
_base_ = './yolov3_d53_mstrain-608_273e_coco.py' # dataset settings # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') input_si...
_base_ = './yolov3_d53_mstrain-608_273e_coco.py' # dataset settings img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], ...
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class AbstractDatasetReader(ABC): def __init__( self, path_or_paths: ...
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class AbstractDatasetReader(ABC): def __init__( self, path_or_paths: ...
"""**Prompt values** for language model prompts. Prompt values are used to represent different pieces of prompts. They can be used to represent text, images, or chat message pieces. """ from __future__ import annotations from abc import ABC, abstractmethod from collections.abc import Sequence from typing import Lite...
"""**Prompt values** for language model prompts. Prompt values are used to represent different pieces of prompts. They can be used to represent text, images, or chat message pieces. """ from __future__ import annotations from abc import ABC, abstractmethod from collections.abc import Sequence from typing import Lite...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Optional from jina import Document, DocumentArray from jina.logging.logger import JinaLogger from pymongo import MongoClient from pymongo.errors import BulkWriteError class MongoHandler: def ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Optional from jina import Document, DocumentArray from jina.logging.logger import JinaLogger from pymongo import MongoClient from pymongo.errors import BulkWriteError class MongoHandler: def ...
# Copyright (c) OpenMMLab. All rights reserved. import time from typing import Optional, Sequence, Union from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Union[dict, tuple, list]] @HOOKS.register_module() class IterTimerHook(Hook): """A hook that logs the time spent during iterat...
# Copyright (c) OpenMMLab. All rights reserved. import time from typing import Optional, Sequence, Union from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Union[dict, tuple, list]] @HOOKS.register_module() class IterTimerHook(Hook): """A hook that logs the time spent during iterat...
_base_ = './cascade-mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pyt...
_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pyt...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Optional, Sequence, Tuple, Union from mmengine.data import BaseDataSample from .base import BaseEvaluator class ComposedEvaluator: """Wrapper class to compose multiple :class:`BaseEvaluator` instances. Args: evaluators (Sequence...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence, Union from mmengine.data import BaseDataSample from .base import BaseEvaluator class ComposedEvaluator: """Wrapper class to compose multiple :class:`BaseEvaluator` instances. Args: evaluators (Sequence[BaseEvaluat...
from typing import ( Union, Optional, TYPE_CHECKING, List, Dict, ) if TYPE_CHECKING: import numpy as np from docarray import DocumentArray class FindMixin: def _find( self, query: 'np.ndarray', limit: Optional[Union[int, float]] = 20, only_id: bool = False...
from typing import ( Union, Optional, TYPE_CHECKING, List, Dict, ) if TYPE_CHECKING: import numpy as np from docarray import DocumentArray class FindMixin: def _find( self, query: 'np.ndarray', limit: Optional[Union[int, float]] = 20, only_id: bool = False...
"""Tool for the Passio Nutrition AI API.""" from typing import Dict, Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.passio_nutrition_ai import NutritionAIAPI class Nutri...
"""Tool for the Passio Nutrition AI API.""" from typing import Dict, Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.passio_nutrition_ai import NutritionAIAPI class Nutri...
import tensorflow as tf class TFExportArchive: def _track_layer(self, layer): # Variables in the lists below are actually part of the trackables # that get saved, because the lists are created in __init__. variables = layer.variables trainable_variables = layer.trainable_variables ...
import tensorflow as tf from keras.src import layers class TFExportArchive: def track(self, resource): if not isinstance(resource, tf.__internal__.tracking.Trackable): raise ValueError( "Invalid resource type. Expected an instance of a " "TensorFlow `Trackable`...
"""Init file of LlamaIndex.""" __version__ = "0.12.24" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
"""Init file of LlamaIndex.""" __version__ = "0.12.23.post2" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='BN', requires_grad=True) image_size = (640, 640) batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] mode...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='BN', requires_grad=True) image_size = (640, 640) batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] mode...
import torch from dataset.hubert_dataset import _crop_audio_label from parameterized import parameterized from torchaudio.models import hubert_base from torchaudio_unittest.common_utils import get_whitenoise, TorchaudioTestCase class TestCropAudioLabel(TorchaudioTestCase): @classmethod def setUpClass(cls) -> ...
import torch from dataset.hubert_dataset import _crop_audio_label from parameterized import parameterized from torchaudio.models import hubert_base from torchaudio_unittest.common_utils import get_whitenoise, TorchaudioTestCase class TestCropAudioLabel(TorchaudioTestCase): @classmethod def setUpClass(cls) -> ...
# Copyright (c) OpenMMLab. All rights reserved. from ._flexible_runner import FlexibleRunner from .amp import autocast from .base_loop import BaseLoop from .checkpoint import (CheckpointLoader, find_latest_checkpoint, get_deprecated_model_names, get_external_models, get...
# Copyright (c) OpenMMLab. All rights reserved. from .amp import autocast from .base_loop import BaseLoop from .checkpoint import (CheckpointLoader, find_latest_checkpoint, get_deprecated_model_names, get_external_models, get_mmcls_models, get_state_dict, ...
"""Simple reader that turns an iterable of strings into a list of Documents.""" from typing import List from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document class StringIterableReader(BasePydanticReader): """ String Iterable Reader. Gets a list of do...
"""Simple reader that turns an iterable of strings into a list of Documents.""" from typing import List from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document class StringIterableReader(BasePydanticReader): """ String Iterable Reader. Gets a list of doc...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
import os import time from jina import Document, DocumentArray import pytest from ..redis_storage import RedisStorage @pytest.fixture(scope='function') def indexer(): return RedisStorage() @pytest.fixture() def docker_compose(request): os.system( f'docker-compose -f {request.param} --project-direc...
import os import time from jina import Document, DocumentArray import pytest from .. import RedisStorage @pytest.fixture(scope='function') def indexer(): return RedisStorage() @pytest.fixture() def docker_compose(request): os.system( f'docker-compose -f {request.param} --project-directory . up --...
from typing import ( TYPE_CHECKING, TypeVar, Sequence, List, Union, ) import numpy as np from .... import Document, DocumentArray from ....math import ndarray from ....math.helper import EPSILON from ....math.ndarray import to_numpy_array from ....score import NamedScore from ....array.mixins.find...
from typing import ( TYPE_CHECKING, TypeVar, Sequence, List, ) import numpy as np from .... import Document, DocumentArray from ....math import ndarray from ....math.helper import EPSILON from ....math.ndarray import to_numpy_array from ....score import NamedScore if TYPE_CHECKING: import tensorf...
import contextlib import os import sqlite3 import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def _check_sql_dataset(dataset, expected_f...
import contextlib import os import sqlite3 import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def _check_sql_dataset(dataset, expected_f...
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample from sentence_transformers import models, util, evaluation, losses import logging import os import gzip from datetime import datetime from torch.utils.data import DataLoader #### Just some code to print debug information to stdout logg...
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample from sentence_transformers import models, util, evaluation, losses import logging import os import gzip from datetime import datetime from torch.utils.data import DataLoader #### Just some code to print debug information to stdout logg...
from torch.fx.experimental.migrate_gradual_types.constraint import ( BinConstraintD, BVar, DVar, TVar, ) from torch.fx.experimental.migrate_gradual_types.operation import op_leq def gen_tvar(curr: int) -> tuple[TVar, int]: """ Generate a tensor variable :param curr: The current counter ...
# mypy: allow-untyped-defs from torch.fx.experimental.migrate_gradual_types.constraint import ( BinConstraintD, BVar, DVar, TVar, ) from torch.fx.experimental.migrate_gradual_types.operation import op_leq def gen_tvar(curr): """ Generate a tensor variable :param curr: The current counter ...
_base_ = [ 'mmdet::_base_/models/mask-rcnn_r50_fpn.py', 'mmdet::_base_/datasets/coco_instance.py', 'mmdet::_base_/schedules/schedule_1x.py', 'mmdet::_base_/default_runtime.py' ] # please install the mmpretrain # import mmpretrain.models to trigger register_module in mmpretrain custom_imports = dict( ...
_base_ = [ 'mmdet::_base_/models/mask-rcnn_r50_fpn.py', 'mmdet::_base_/datasets/coco_instance.py', 'mmdet::_base_/schedules/schedule_1x.py', 'mmdet::_base_/default_runtime.py' ] # please install the mmclassification dev-1.x branch # import mmcls.models to trigger register_module in mmcls custom_imports...
from __future__ import annotations from collections.abc import Iterable from typing import Any import torch from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import fullname class CosineSimilarityLoss(nn.Module): def __init__(...
from __future__ import annotations from typing import Any, Iterable import torch from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import fullname class CosineSimilarityLoss(nn.Module): def __init__( self, mode...
__version__ = '0.34.0' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.utils._internal.misc import _get_path_from_docarray_root_level __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler()...
__version__ = '0.33.1' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.utils._internal.misc import _get_path_from_docarray_root_level __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler()...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import MaskedConv2d from mmdet.registry import MODELS from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead @MODELS.register_module() class GARetinaHead(GuidedAnchorHead): """Guided-Anc...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import MaskedConv2d from ..builder import HEADS from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead @HEADS.register_module() class GARetinaHead(GuidedAnchorHead): """Guided-Anchor-bas...
""" This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled, for example with mean-pooling. """ import logging import sys import traceback from datetime import datetime from datasets import load_dataset from sentence_transformers import SentenceTransformer, losses, models from ...
""" This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled, for example with mean-pooling. """ import torch from torch.utils.data import DataLoader import math from sentence_transformers import models, losses, util from sentence_transformers import LoggingHandler, SentenceTransf...
import os import subprocess directory = os.path.dirname(os.path.realpath(__file__)) target_dirs = ["../backend", "../autogpt_libs"] def run(*command: str) -> None: print(f">>>>> Running poetry run {' '.join(command)}") subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True) def lint():...
import os import subprocess directory = os.path.dirname(os.path.realpath(__file__)) def run(*command: str) -> None: print(f">>>>> Running poetry run {' '.join(command)}") subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True) def lint(): try: run("ruff", "check", ".", "--e...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' # model settings model = dict( type='FSAF', bbox_head=dict( type='FSAFHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, reg_decoded_bbox=True, # Only anchor-free branch is imple...
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' # model settings model = dict( type='FSAF', bbox_head=dict( type='FSAFHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, reg_decoded_bbox=True, # Only anchor-free branch is imple...
import torch from torchaudio_unittest.common_utils import PytorchTestCase from torchaudio_unittest.prototype.hdemucs_test_impl import CompareHDemucsOriginal, HDemucsTests class HDemucsFloat32CPUTest(HDemucsTests, CompareHDemucsOriginal, PytorchTestCase): dtype = torch.float32 device = torch.device("cpu")
import torch from torchaudio_unittest.common_utils import PytorchTestCase from torchaudio_unittest.prototype.hdemucs_test_impl import HDemucsTests class HDemucsFloat32CPUTest(HDemucsTests, PytorchTestCase): dtype = torch.float32 device = torch.device("cpu")
from pydantic import BaseModel from backend.data.block import ( Block, BlockCategory, BlockOutput, BlockSchema, BlockWebhookConfig, ) from backend.data.model import SchemaField from backend.integrations.providers import ProviderName from backend.util import settings from backend.util.settings impor...
from pydantic import BaseModel from backend.data.block import ( Block, BlockCategory, BlockOutput, BlockSchema, BlockWebhookConfig, ) from backend.data.model import SchemaField from backend.integrations.providers import ProviderName from backend.util import settings from backend.util.settings impor...
import numpy as np from absl.testing import parameterized from keras.src import backend from keras.src import dtype_policies from keras.src import layers from keras.src import testing class ZeroPadding2DTest(testing.TestCase): @parameterized.parameters( {"data_format": "channels_first"}, {"data_f...
import numpy as np from absl.testing import parameterized from keras.src import backend from keras.src import dtype_policies from keras.src import layers from keras.src import testing class ZeroPadding2DTest(testing.TestCase, parameterized.TestCase): @parameterized.parameters( {"data_format": "channels_f...
__version__ = '0.31.0' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.utils._internal.misc import _get_path_from_docarray_root_level __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler()...
__version__ = '0.30.1' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.utils._internal.misc import _get_path_from_docarray_root_level __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler()...
import datetime import uuid from unittest.mock import MagicMock, patch from langsmith.schemas import Example from langchain_core.document_loaders import LangSmithLoader from langchain_core.documents import Document def test_init() -> None: LangSmithLoader(api_key="secret") EXAMPLES = [ Example( in...
import datetime import uuid from unittest.mock import MagicMock, patch from langsmith.schemas import Example from langchain_core.document_loaders import LangSmithLoader from langchain_core.documents import Document def test_init() -> None: LangSmithLoader(api_key="secret") EXAMPLES = [ Example( in...
# Copyright (c) OpenMMLab. All rights reserved. import os import platform import warnings import cv2 import torch.multiprocessing as mp def setup_multi_processes(cfg): """Setup multi-processing environment variables.""" # set multi-process start method as `fork` to speed up the training if platform.syste...
# Copyright (c) OpenMMLab. All rights reserved. import os import platform import warnings import cv2 import torch.multiprocessing as mp def setup_multi_processes(cfg): """Setup multi-processing environment variables.""" # set multi-process start method as `fork` to speed up the training if platform.syste...
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class FlopsLoss(nn.Module): def __init__(self, model: SparseEncoder) -> None: """ FlopsLoss implements a...
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class FlopsLoss(nn.Module): def __init__(self, model: SparseEncoder) -> None: super().__init__() self.mo...
from __future__ import annotations from collections.abc import Sequence from typing import Literal, TypeAlias from ._typing import Array, Device, DType, Namespace _Norm: TypeAlias = Literal["backward", "ortho", "forward"] # Note: NumPy fft functions improperly upcast float32 and complex64 to # complex128, which is ...
from __future__ import annotations from typing import TYPE_CHECKING, Union, Optional, Literal if TYPE_CHECKING: from ._typing import Device, ndarray, DType from collections.abc import Sequence # Note: NumPy fft functions improperly upcast float32 and complex64 to # complex128, which is why we require wrappin...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
_base_ = 'faster-rcnn_r50-caffe_fpn_1x_coco.py' # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[60000, 80000]) # Runner type runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) checkpoint_config = dict(interval=1000...
_base_ = 'faster_rcnn_r50_caffe_fpn_1x_coco.py' # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[60000, 80000]) # Runner type runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) checkpoint_config = dict(interval=1000...
from abc import abstractmethod from typing import Any, List, Union from llama_index.core.graph_stores.types import PropertyGraphStore from llama_index.core.indices.property_graph.sub_retrievers.base import BasePGRetriever from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode CUSTOM_RETRIEVE_TYPE = ...
from abc import abstractmethod from typing import Any, List, Union from llama_index.core.graph_stores.types import PropertyGraphStore from llama_index.core.indices.property_graph.sub_retrievers.base import BasePGRetriever from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode CUSTOM_RETRIEVE_TYPE = ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Optional, Sequence, Tuple from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class ParamSchedulerHook(Hook): """A hook to update some hyper-parameters in optimizer, e....
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Optional, Sequence, Tuple from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class ParamSchedulerHook(Hook): """A hook to update some hyper-parameters in optimizer, e....
# coding: utf-8 from functools import lru_cache import numpy as np import sklearn.datasets from sklearn.utils import check_random_state @lru_cache(maxsize=None) def load_boston(**kwargs): return sklearn.datasets.load_boston(**kwargs) @lru_cache(maxsize=None) def load_breast_cancer(**kwargs): return sklearn...
# coding: utf-8 from functools import lru_cache import numpy as np import sklearn.datasets from sklearn.utils import check_random_state @lru_cache(maxsize=None) def load_boston(**kwargs): return sklearn.datasets.load_boston(**kwargs) @lru_cache(maxsize=None) def load_breast_cancer(**kwargs): return sklearn...
from prisma.models import User from backend.blocks.basic import StoreValueBlock from backend.blocks.io import AgentInputBlock from backend.blocks.text import FillTextTemplateBlock from backend.data import graph from backend.data.graph import create_graph from backend.data.user import get_or_create_user from backend.ut...
from prisma.models import User from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock from backend.blocks.text import FillTextTemplateBlock from backend.data import graph from backend.data.graph import create_graph from backend.data.user import get_or_create_user from backend.util.test import SpinTestSe...
# Copyright (c) OpenMMLab. All rights reserved. from argparse import ArgumentParser, Namespace from pathlib import Path from tempfile import TemporaryDirectory import mmcv try: from model_archiver.model_packaging import package_model from model_archiver.model_packaging_utils import ModelExportUtils except Imp...
from argparse import ArgumentParser, Namespace from pathlib import Path from tempfile import TemporaryDirectory import mmcv try: from model_archiver.model_packaging import package_model from model_archiver.model_packaging_utils import ModelExportUtils except ImportError: package_model = None def mmdet2t...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import mmengine from mmengine.utils import digit_version from .version import __version__, version_info mmcv_minimum_version = '2.0.0rc0' mmcv_maximum_version = '2.0.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.0.0' mmengi...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_v...
prompt_template = """Given the following question and context, extract any part of the context *AS IS* that is relevant to answer the question. If none of the context is relevant return {no_output_str}. Remember, *DO NOT* edit the extracted parts of the context. > Question: {{question}} > Context: >>> {{context}} >>>...
# flake8: noqa prompt_template = """Given the following question and context, extract any part of the context *AS IS* that is relevant to answer the question. If none of the context is relevant return {no_output_str}. Remember, *DO NOT* edit the extracted parts of the context. > Question: {{question}} > Context: >>>...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class GFL(SingleStageDetector): """Implementation of `GFL <https://arxiv.org/abs/2006.04388>`_ ...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class GFL(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=Non...
import os import pytest from llama_index.core.agent.function_calling.base import FunctionCallingAgent from llama_index.core.tools.tool_spec.base import BaseToolSpec from llama_index.llms.openai import OpenAI from llama_index.tools.agentql import AgentQLBrowserToolSpec from llama_index.tools.playwright import Playwrigh...
import pytest import os from llama_index.core.tools.tool_spec.base import BaseToolSpec from llama_index.core.agent import FunctionCallingAgent from llama_index.tools.agentql import AgentQLBrowserToolSpec from llama_index.tools.playwright import PlaywrightToolSpec from llama_index.llms.openai import OpenAI from test...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_doc import BaseDoc from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces from docarray.typing.tensor.embedding import AnyEmbedding from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl T = TypeVar('T', bound='Mesh3D') cl...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_document import BaseDocument from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces from docarray.typing.tensor.embedding import AnyEmbedding from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl T = TypeVar('T', bound='Mes...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import MagicMock, Mock, patch from mmengine.hooks import IterTimerHook from mmengine.logging import MessageHub def time_patch(): if not hasattr(time_patch, 'time'): time_patch.time = 0 else: time_...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import MagicMock, Mock, patch from mmengine.hooks import IterTimerHook from mmengine.logging import MessageHub def time_patch(): if not hasattr(time_patch, 'time'): time_patch.time = 0 else: time_...
from datetime import datetime from enum import Enum import os from typing import List, Optional, Union import pytest from llama_index.core.program.function_program import get_function_tool from pydantic import BaseModel, Field from llama_index.llms.google_genai import GoogleGenAI from llama_index.llms.google_genai.ut...
from datetime import datetime from enum import Enum import os from typing import List, Optional, Union import pytest from llama_index.core.program.function_program import get_function_tool from pydantic import BaseModel, Field from llama_index.llms.google_genai import GoogleGenAI from llama_index.llms.google_genai.ut...
from typing import Type, TypeVar from pydantic import AnyUrl as BaseAnyUrl from pydantic import parse_obj_as from docarray.document.base_node import BaseNode from docarray.proto import NodeProto T = TypeVar('T', bound='AnyUrl') class AnyUrl(BaseAnyUrl, BaseNode): def _to_node_protobuf(self) -> NodeProto: ...
from pydantic import AnyUrl as BaseAnyUrl from docarray.document.base_node import BaseNode from docarray.proto import NodeProto class AnyUrl(BaseAnyUrl, BaseNode): def _to_node_protobuf(self) -> NodeProto: """Convert Document into a NodeProto protobuf message. This function should be called when ...
import os import fsspec import pytest from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info from .utils import require_lz4, require_zstandard def test_extract_path_from_uri(): ...
import os import fsspec import pytest from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info from .utils import require_lz4, require_zstandard def test_extract_path_from_uri(): ...
_base_ = './faster-rcnn_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.)
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.)
from typing import Union import torch import torch.fx from torch import nn, Tensor from torch.jit.annotations import BroadcastingList2 from torch.nn.modules.utils import _pair from torchvision.extension import _assert_has_ops from ..utils import _log_api_usage_once from ._utils import check_roi_boxes_shape, convert_b...
from typing import List, Union import torch import torch.fx from torch import nn, Tensor from torch.jit.annotations import BroadcastingList2 from torch.nn.modules.utils import _pair from torchvision.extension import _assert_has_ops from ..utils import _log_api_usage_once from ._utils import check_roi_boxes_shape, con...
import pytest from docarray import DocumentArray from docarray.array.opensearch import DocumentArrayOpenSearch from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.o...
import pytest from docarray import DocumentArray from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.qdrant import QdrantConfig from docarray.array.storage.weaviate...
import asyncio from typing import Any, Callable, Optional, Sequence, Union from llama_index.core.async_utils import run_jobs from llama_index.core.indices.property_graph.utils import ( default_parse_triplets_fn, ) from llama_index.core.graph_stores.types import ( EntityNode, Relation, KG_NODES_KEY, ...
import asyncio from typing import Any, Callable, Optional, Sequence, Union from llama_index.core.async_utils import run_jobs from llama_index.core.indices.property_graph.utils import ( default_parse_triplets_fn, ) from llama_index.core.graph_stores.types import ( EntityNode, Relation, KG_NODES_KEY, ...
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to...
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to...
from typing import Union from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding from docarray.utils._internal.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.embedding.torch import TorchEmbedding tf_available =...
from typing import Union from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding from docarray.utils.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.embedding.torch import TorchEmbedding tf_available = is_tf_ava...
# model settings input_size = 300 model = dict( type='SingleStageDetector', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[1, 1, 1], bgr_to_rgb=True, pad_size_divisor=1), backbone=dict( type='SSDVGG', depth=16,...
# model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) input_size = 300 model = dict( type='SingleStageDetector', preprocess_cfg=preprocess_cfg, backbone=dict( type='SSDVGG', depth=16, with_last_pool=False, ceil_mode=True, ...
from typing import Any, Dict, Iterator import torch from ..utils import _log_api_usage_once try: from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER except ModuleNotFoundError: _HAS_GPU_VIDEO_DECODER = False from ._video_opt import ( _HAS_VIDEO_OPT, _probe_video_from_file, _probe_video_from_me...
from typing import Any, Dict, Iterator import torch from ..utils import _log_api_usage_once try: from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER except ModuleNotFoundError: _HAS_GPU_VIDEO_DECODER = False from ._video_opt import ( _HAS_VIDEO_OPT, _probe_video_from_file, _probe_video_from_me...
import sys from jina.parsers import set_gateway_parser from jina.parsers.helper import _update_gateway_args from jina.serve.runtimes.gateway import GatewayRuntime def run(*args, **kwargs): runtime_cls = GatewayRuntime print(f' args {args}') runtime_args = set_gateway_parser().parse_args(args) print(f...
import sys from jina.parsers import set_gateway_parser from jina.parsers.helper import _set_gateway_uses from jina.serve.runtimes.gateway import GatewayRuntime def run(*args, **kwargs): runtime_cls = GatewayRuntime print(f' args {args}') runtime_args = set_gateway_parser().parse_args(args) print(f' p...
from torchaudio.utils import sox_utils from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoSox @skipIfNoSox class TestSoxUtils(PytorchTestCase): """Smoke tests for sox_util module""" def test_set_seed(self): """`set_seed` does not crush""" sox_utils.set_seed(0) def test...
from torchaudio.utils import sox_utils from torchaudio_unittest.common_utils import ( PytorchTestCase, skipIfNoSox, ) @skipIfNoSox class TestSoxUtils(PytorchTestCase): """Smoke tests for sox_util module""" def test_set_seed(self): """`set_seed` does not crush""" sox_utils.set_seed(0) ...
from prisma.models import User from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock from backend.blocks.text import FillTextTemplateBlock from backend.data import graph from backend.data.graph import create_graph from backend.data.user import get_or_create_user from backend.util.test import SpinTestSe...
from prisma.models import User from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock from backend.blocks.text import FillTextTemplateBlock from backend.data import graph from backend.data.graph import create_graph from backend.data.user import get_or_create_user from backend.util.test import SpinTestSe...
from torchvision.transforms import InterpolationMode # usort: skip from ._utils import is_simple_tensor, register_kernel # usort: skip from ._meta import ( clamp_bounding_boxes, convert_format_bounding_boxes, get_dimensions_image_tensor, get_dimensions_image_pil, get_dimensions_video, get_di...
from torchvision.transforms import InterpolationMode # usort: skip from ._utils import is_simple_tensor, register_kernel # usort: skip from ._meta import ( clamp_bounding_boxes, convert_format_bounding_boxes, get_dimensions_image_tensor, get_dimensions_image_pil, get_dimensions_video, get_di...
"""Google Search API Toolkit.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import GoogleSearchResults, GoogleSearchRun # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising depreca...
"""Google Search API Toolkit.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import GoogleSearchResults, GoogleSearchRun # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising depreca...
"""Tracker for XGBoost collective.""" import ctypes import json import socket from enum import IntEnum, unique from typing import Dict, Optional, Union from .core import _LIB, _check_call, _deprecate_positional_args, make_jcargs def get_family(addr: str) -> int: """Get network family from address.""" return...
"""Tracker for XGBoost collective.""" import ctypes import json import socket from enum import IntEnum, unique from typing import Dict, Optional, Union from .core import _LIB, _check_call, make_jcargs def get_family(addr: str) -> int: """Get network family from address.""" return socket.getaddrinfo(addr, No...
""" This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled, for example with max-pooling (which gives a system like InferSent) or with mean-pooling. Note, you can also pass BERT embeddings to the BiLSTM. """ import logging import traceback from datetime import datetime fr...
""" This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled, for example with max-pooling (which gives a system like InferSent) or with mean-pooling. Note, you can also pass BERT embeddings to the BiLSTM. """ import torch from torch.utils.data import DataLoader import math f...
from typing import Union import torch import transformers from PIL import Image from torch import nn class CLIPModel(nn.Module): def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None): super(CLIPModel, self).__init__() if processor_name is None: pro...
from torch import nn import transformers import torch from PIL import Image class CLIPModel(nn.Module): def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name = None): super(CLIPModel, self).__init__() if processor_name is None: processor_name = model_nam...
from langchain_core.prompts.prompt import PromptTemplate _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} Standalone question:""" # noqa: E501 CONDENSE_QUESTION_PROMPT = Prom...
# flake8: noqa from langchain_core.prompts.prompt import PromptTemplate _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} Standalone question:""" CONDENSE_QUESTION_PROMPT = Pro...
""" Here, because clip is not consistent with the use of the "Text" and "Vision" prefixes, we cannot simply use ``` class Multimodal2VisionModel(CLIPVisionModel): pass ``` with the hope that all dependencies will be renamed as `Multimodal2VisionClass`. For this reason, if we want consistency and use the "Vision" pa...
""" Here, because clip is not consistent with the use of the "Text" and "Vision" prefixes, we cannot simply use ``` class Multimodal2VisionModel(CLIPVisionModel): pass ``` with the hope that all dependencies will be renamed as `Multimodal2VisionClass`. For this reason, if we want consistency and use the "Vision" pa...
__version__ = '0.13.17' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
__version__ = '0.13.16' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
from pydantic import BaseModel from typing import Any, AsyncGenerator, List from llama_index.llms.nvidia import NVIDIA as Interface from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.program import FunctionCallingProgram import pytest from llama_index.llms.nvidia.utils import ( MODE...
from pydantic import BaseModel from typing import Any, AsyncGenerator, List from llama_index.llms.nvidia import NVIDIA as Interface from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.program import FunctionCallingProgram import pytest from llama_index.llms.nvidia.utils import ( MODE...
"""Development Scripts for template packages.""" from collections.abc import Sequence from typing import Literal from fastapi import FastAPI from langserve import add_routes from langchain_cli.utils.packages import get_langserve_export, get_package_root def create_demo_server( *, config_keys: Sequence[str]...
# type: ignore """Development Scripts for template packages.""" from collections.abc import Sequence from fastapi import FastAPI from langserve import add_routes from langchain_cli.utils.packages import get_langserve_export, get_package_root def create_demo_server( *, config_keys: Sequence[str] = (), p...
from contextlib import nullcontext from typing import List import pytest import torch import tqdm from torch.optim import Adam from transformers import set_seed from sentence_transformers import InputExample, SentenceTransformer, losses @pytest.mark.parametrize( ["train_samples_mnrl", "train_samples_cmnrl", "sa...
from contextlib import nullcontext from typing import List import pytest from sentence_transformers import SentenceTransformer, InputExample, losses import tqdm from transformers import set_seed import torch from torch.optim import Adam @pytest.mark.parametrize( ["train_samples_mnrl", "train_samples_cmnrl", "same...
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the :class:`~sklearn.cluster.SpectralBiclustering` algorithm. The spectral biclustering a...
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the :class:`~sklearn.cluster.SpectralBiclustering` algorithm. The spectral biclustering a...
# Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import warnings import mmcv from mmcv.utils import print_log def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working directory. Args: path(str): The path to find checkpo...
# Copyright (c) OpenMMLab. All rights reserved. import glob import os.path as osp import warnings def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working directory. Args: path(str): The path to find checkpoints. suffix(str): File extension. D...
# Copyright (c) OpenMMLab. All rights reserved. from .utils import _dummy_bbox_sampling __all__ = ['_dummy_bbox_sampling']
from .utils import _dummy_bbox_sampling __all__ = ['_dummy_bbox_sampling']
from .cmuarctic import CMUARCTIC from .cmudict import CMUDict from .commonvoice import COMMONVOICE from .dr_vctk import DR_VCTK from .fluentcommands import FluentSpeechCommands from .gtzan import GTZAN from .librilight_limited import LibriLightLimited from .librimix import LibriMix from .librispeech import LIBRISPEECH ...
from .cmuarctic import CMUARCTIC from .cmudict import CMUDict from .commonvoice import COMMONVOICE from .dr_vctk import DR_VCTK from .gtzan import GTZAN from .librilight_limited import LibriLightLimited from .librimix import LibriMix from .librispeech import LIBRISPEECH from .libritts import LIBRITTS from .ljspeech imp...
_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), ...
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), ...
import numpy as np import pytest from pydantic import Field from docarray import BaseDoc from docarray.index import HnswDocumentIndex from docarray.typing import NdArray pytestmark = [pytest.mark.slow, pytest.mark.index] class SimpleDoc(BaseDoc): tens: NdArray[10] = Field(dim=1000) class NestedDoc(BaseDoc): ...
import numpy as np import pytest from pydantic import Field from docarray import BaseDoc from docarray.index import HnswDocumentIndex from docarray.typing import NdArray pytestmark = [pytest.mark.slow, pytest.mark.index] class SimpleDoc(BaseDoc): tens: NdArray[10] = Field(dim=1000) class NestedDoc(BaseDoc): ...
import functools import warnings from collections import defaultdict from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union import torch from torchvision import datapoints from torchvision.transforms.v2 import Transform from torchvision.transforms.v2.utils import is_pure_tensor T = TypeVar("...
import functools import warnings from collections import defaultdict from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union import torch from torchvision import datapoints from torchvision.transforms.v2 import Transform from torchvision.transforms.v2.utils import is_simple_tensor T = TypeVar...
from __future__ import annotations from collections.abc import Iterable from typing import Any import torch from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import fullname class CosineSimilarityLoss(nn.Module): def __init__(...
from __future__ import annotations from collections.abc import Iterable from typing import Any import torch from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import fullname class CosineSimilarityLoss(nn.Module): def __init__(...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import tempfile from unittest import TestCase from unittest.mock import Mock import torch import torch.nn as nn from torch.utils.data import Dataset from mmengine.hooks import EMAHook from mmengine.model import ExponentialMovingAverage from mmengin...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import tempfile from unittest import TestCase from unittest.mock import Mock import torch import torch.nn as nn from torch.utils.data import Dataset from mmengine.hooks import EMAHook from mmengine.model import ExponentialMovingAverage from mmengin...
import logging from typing import Any, Optional from llama_index.core.bridge.pydantic import Field, model_serializer, ValidationError from llama_index.core.tools import ToolSelection, ToolOutput from llama_index.core.llms import ChatMessage from llama_index.core.workflow import Event, StartEvent logger = logging.get...
from typing import Any, Optional from llama_index.core.bridge.pydantic import Field, model_serializer from llama_index.core.tools import ToolSelection, ToolOutput from llama_index.core.llms import ChatMessage from llama_index.core.workflow import Event, StartEvent class AgentInput(Event): """LLM input.""" i...
""" This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820 TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single passage is marked as relevant for a given query. Many other highly relevant passages are n...
""" This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820 TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single passage is marked as relevant for a given query. Many other highly relevant passages are n...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from typing import Optional import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, ...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from typing import Optional import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, ...
from ._bounding_box import BoundingBox, BoundingBoxFormat from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image from ._mask import Mask from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, ...
from ._bounding_box import BoundingBox, BoundingBoxFormat from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT from ._image import Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT from ._mask import Mask from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, Vide...
import os from typing import Dict DEPLOYMENT_FILES = [ 'statefulset-executor', 'deployment-executor', 'deployment-gateway', 'deployment-uses-before', 'deployment-uses-after', 'deployment-uses-before-after', ] cur_dir = os.path.dirname(__file__) DEFAULT_RESOURCE_DIR = os.path.join( cur_dir,...
import os from typing import Dict DEPLOYMENT_FILES = [ 'statefulset-executor', 'deployment-executor', 'deployment-gateway', 'deployment-uses-before', 'deployment-uses-after', 'deployment-uses-before-after', ] cur_dir = os.path.dirname(__file__) DEFAULT_RESOURCE_DIR = os.path.join( cur_dir,...
""" Demo for using data iterator with Quantile DMatrix ================================================== .. versionadded:: 1.2.0 The demo that defines a customized iterator for passing batches of data into :py:class:`xgboost.QuantileDMatrix` and use this ``QuantileDMatrix`` for training. The feature is primaril...
""" Demo for using data iterator with Quantile DMatrix ================================================== .. versionadded:: 1.2.0 The demo that defines a customized iterator for passing batches of data into :py:class:`xgboost.QuantileDMatrix` and use this ``QuantileDMatrix`` for training. The feature is used pri...