input
stringlengths
33
5k
output
stringlengths
32
5k
import argparse from jina.enums import GatewayProtocolType from jina.helper import parse_host_scheme from jina.logging.predefined import default_logger class NetworkChecker: """Check if a BaseDeployment is running or not.""" def __init__(self, args: 'argparse.Namespace'): """ Create a new :c...
import argparse import urllib from http import HTTPStatus from jina.enums import GatewayProtocolType from jina.helper import parse_host_scheme from jina.logging.predefined import default_logger class NetworkChecker: """Check if a BaseDeployment is running or not.""" def __init__(self, args: 'argparse.Namesp...
_base_ = './freeanchor_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
# Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict from mmcv.utils import print_log from mmdet.core import eval_map, eval_recalls from mmdet.registry import DATASETS from .xml_style import XMLDataset @DATASETS.register_module() class VOCDataset(XMLDataset): CLASSES = ('aeroplan...
# Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict from mmcv.utils import print_log from mmdet.core import eval_map, eval_recalls from .builder import DATASETS from .xml_style import XMLDataset @DATASETS.register_module() class VOCDataset(XMLDataset): CLASSES = ('aeroplane', 'b...
# Copyright (c) OpenMMLab. All rights reserved. from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, OptimWrapperDict, ZeroRedundancyOptim...
# Copyright (c) OpenMMLab. All rights reserved. from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, ApexOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, OptimWrapperDict, ZeroRedundancyOptimizer, ...
import warnings from typing import List, Optional, Type from jina.excepts import BadYAMLVersion from jina.jaml import JAMLCompatible from jina.jaml.parsers.base import VersionedYAMLParser from jina.orchestrate.deployments import Deployment from jina.serve.gateway import BaseGateway def _get_all_parser(cls: Type['JAM...
import warnings from typing import List, Optional, Type from jina.excepts import BadYAMLVersion from jina.jaml import JAMLCompatible from jina.jaml.parsers.base import VersionedYAMLParser from jina.serve.gateway import BaseGateway def _get_all_parser(cls: Type['JAMLCompatible']): """Get all parsers and legacy pa...
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests."""
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests.""" pass
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from pathlib import Path REPO_ROOT_DIR = Path(__file__).parent.parent.absolute() TOYDATA_DIR = REPO_ROOT_DIR / 'tests' / 'toydata'
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model student_model = SparseEncoder("prithivida/Splade_PP_en_v1") tea...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model student_model = SparseEncoder("prithivida/Splade_PP_en_v1") tea...
from pathlib import Path default_exec_file = Path(__file__).absolute().parents[2] / "lightgbm" def pytest_addoption(parser): parser.addoption("--execfile", action="store", default=str(default_exec_file))
from pathlib import Path default_exec_file = Path(__file__).absolute().parents[2] / 'lightgbm' def pytest_addoption(parser): parser.addoption('--execfile', action='store', default=str(default_exec_file))
import logging from pathlib import Path from typing import Optional, Sequence from llama_index.core.base.llms.types import ImageBlock from llama_index.core.multi_modal_llms.base import ChatMessage, ImageNode DEFAULT_OPENAI_API_TYPE = "open_ai" DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1" GPT4V_MODELS = { ...
import logging from pathlib import Path from typing import Optional, Sequence from llama_index.core.base.llms.types import ImageBlock from llama_index.core.multi_modal_llms.base import ChatMessage, ImageNode DEFAULT_OPENAI_API_TYPE = "open_ai" DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1" GPT4V_MODELS = { ...
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py'] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_...
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN'...
_base_ = './fast-rcnn_r50_fpn_1x_coco.py' model = dict( data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False, pad_size_divisor=32), backbone=dict( norm_cfg=dict(type='BN', requires_grad=False)...
_base_ = './fast-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='BN', requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( ...
import glob import os from datetime import datetime import pytest from jina import Document, Flow, __uptime__, __windows__ from jina.enums import LogVerbosity from jina.helper import colored from jina.logging.logger import JinaLogger cur_dir = os.path.dirname(os.path.abspath(__file__)) def log(logger: JinaLogger):...
import glob import os from datetime import datetime import pytest from jina import Document, Flow, __uptime__, __windows__ from jina.enums import LogVerbosity from jina.helper import colored from jina.logging.logger import JinaLogger cur_dir = os.path.dirname(os.path.abspath(__file__)) def log(logger: JinaLogger):...
from __future__ import annotations try: from typing import Self except ImportError: from typing_extensions import Self import torch import transformers from PIL import Image from sentence_transformers.models.Router import InputModule class CLIPModel(InputModule): save_in_root: bool = True def __in...
from __future__ import annotations try: from typing import Self except ImportError: from typing_extensions import Self import torch import transformers from PIL import Image from sentence_transformers.models.Asym import InputModule class CLIPModel(InputModule): save_in_root: bool = True def __init...
from backend.blocks.jina._auth import ( JinaCredentials, JinaCredentialsField, JinaCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request import Requests class JinaEmbeddingBlock(Block): cla...
from backend.blocks.jina._auth import ( JinaCredentials, JinaCredentialsField, JinaCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request import requests class JinaEmbeddingBlock(Block): cla...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow T = TypeVar('T', bound='AudioTensorFlowTensor') @_register_pr...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow T = TypeVar('T', bound='AudioTensorFlowTensor') @_register_pr...
""" =========================================== Sparse coding with a precomputed dictionary =========================================== Transform a signal as a sparse combination of Ricker wavelets. This example visually compares different sparse coding methods using the :class:`~sklearn.decomposition.SparseCoder` est...
""" =========================================== Sparse coding with a precomputed dictionary =========================================== Transform a signal as a sparse combination of Ricker wavelets. This example visually compares different sparse coding methods using the :class:`~sklearn.decomposition.SparseCoder` est...
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _TestCommandArgs = namedtuple( "_TestCommandArgs", [ "dataset", "name",...
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _TestCommandArgs = namedtuple( "_TestCommandArgs", [ "dataset", "name",...
import os import numpy as np import keras from keras.src import testing from keras.src.saving.file_editor import KerasFileEditor def get_source_model(): inputs = keras.Input((2,)) x = keras.layers.Dense(3, name="mydense")(inputs) outputs = keras.layers.Dense(3, name="output_layer")(x) model = keras....
import os import numpy as np import keras from keras.src import testing from keras.src.saving.file_editor import KerasFileEditor def get_source_model(): inputs = keras.Input((2,)) x = keras.layers.Dense(3, name="mydense")(inputs) outputs = keras.layers.Dense(3, name="output_layer")(x) model = keras....
import pytest from llama_index.core.workflow.decorators import step from llama_index.core.workflow.events import Event, StartEvent, StopEvent from llama_index.core.workflow.workflow import Context, Workflow class DummyEvent(Event): pass class IntermediateEvent1(Event): value: int class IntermediateEvent2(...
import pytest from llama_index.core.workflow.decorators import step from llama_index.core.workflow.events import Event, StartEvent, StopEvent from llama_index.core.workflow.workflow import Context, Workflow class DummyEvent(Event): pass class IntermediateEvent1(Event): value: int class IntermediateEvent2(...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser...
from typing import Dict from jina.helper import TYPE_CHECKING, T, deprecate_by, typename if TYPE_CHECKING: # pragma: no cover from jina.proto import jina_pb2 class ProtoTypeMixin: """The base mixin class of all Jina types. .. note:: - All Jina types should inherit from this class. - All...
from typing import Dict from jina.helper import TYPE_CHECKING, T, deprecate_by, typename if TYPE_CHECKING: from jina.proto import jina_pb2 class ProtoTypeMixin: """The base mixin class of all Jina types. .. note:: - All Jina types should inherit from this class. - All subclass should ha...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import numpy as np import pytest import torch import torchvision.models.video as models from jina import Document, DocumentArray, Executor from torchvision import transforms from video_t...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import numpy as np import pytest import torch import torchvision.models.video as models from jina import Document, DocumentArray, Executor from torchvision import transforms from ...vid...
""" This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled, for example with max-pooling (which gives a system like InferSent) or with mean-pooling. Note, you can also pass BERT embeddings to the BiLSTM. """ import logging import traceback from datetime import datetime fr...
""" This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled, for example with max-pooling (which gives a system like InferSent) or with mean-pooling. Note, you can also pass BERT embeddings to the BiLSTM. """ from torch.utils.data import DataLoader import math from sentence...
import logging import random from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/spl...
import logging import random from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/spl...
from __future__ import annotations from typing import Any, Callable, List, Tuple, Type, Union import PIL.Image from torchvision import datapoints from torchvision._utils import sequence_to_str from torchvision.transforms.v2.functional import get_dimensions, get_size, is_simple_tensor def get_bounding_boxes(flat_in...
from __future__ import annotations from typing import Any, Callable, List, Tuple, Type, Union import PIL.Image from torchvision import datapoints from torchvision._utils import sequence_to_str from torchvision.transforms.v2.functional import get_dimensions, get_size, is_simple_tensor def query_bounding_boxes(flat_...
import os from enum import Enum from typing import Any, Dict, List, Optional from langchain_core.callbacks import CallbackManagerForRetrieverRun from langchain_core.documents import Document from langchain_core.retrievers import BaseRetriever class SearchDepth(Enum): """Search depth as enumerator.""" BASIC ...
import os from enum import Enum from typing import Any, Dict, List, Optional from langchain_core.callbacks import CallbackManagerForRetrieverRun from langchain_core.documents import Document from langchain_core.retrievers import BaseRetriever class SearchDepth(Enum): """Search depth as enumerator.""" BASIC ...
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import Tensor, nn class LayerNorm(nn.Module): def __init__(self, dimension: int): super()...
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import Tensor, nn class LayerNorm(nn.Module): def __init__(self, dimension: int): super(L...
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type if TYPE_CHECKING: from docarray import BaseDocument def _is_access_path_valid(doc_type: Type['BaseDocument'], access_path: str) -> bool: """ Check if a given access path ("__"-separated) is a valid path for a given Document class. """ ...
from typing import TYPE_CHECKING, Any, Dict, List, Type if TYPE_CHECKING: from docarray import BaseDocument def _is_access_path_valid(doc_type: Type['BaseDocument'], access_path: str) -> bool: """ Check if a given access path ("__"-separated) is a valid path for a given Document class. """ from d...
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ ...
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ ...
from __future__ import annotations from collections.abc import Iterable from enum import Enum from typing import Any import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer class TripletDistanceMetric(Enum): """The metric for the tr...
from __future__ import annotations from enum import Enum from typing import Any, Iterable import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer class TripletDistanceMetric(Enum): """The metric for the triplet loss""" COSINE =...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch from mmdet.models.dense_heads import PAAHead, paa_head from mmdet.models.dense_heads.paa_head import levels_to_images def test_paa_head_loss(): """Tests paa head loss when truth is empty and non-empty.""" class mock_...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch from mmdet.models.dense_heads import PAAHead, paa_head from mmdet.models.dense_heads.paa_head import levels_to_images def test_paa_head_loss(): """Tests paa head loss when truth is empty and non-empty.""" class mock_...
import re from typing import Dict MISTRALAI_MODELS: Dict[str, int] = { "mistral-tiny": 32000, "mistral-small": 32000, "mistral-medium": 32000, "mistral-large": 131000, "mistral-saba-latest": 32000, "open-mixtral-8x7b": 32000, "open-mistral-7b": 32000, "open-mixtral-8x22b": 64000, "m...
from typing import Dict MISTRALAI_MODELS: Dict[str, int] = { "mistral-tiny": 32000, "mistral-small": 32000, "mistral-medium": 32000, "mistral-large": 131000, "mistral-saba-latest": 32000, "open-mixtral-8x7b": 32000, "open-mistral-7b": 32000, "open-mixtral-8x22b": 64000, "mistral-sma...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine import Config from mmengine.data import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import AnchorHead class TestAnchorHead(TestCase): def test_anchor_head_loss(self): """T...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine import Config from mmengine.data import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import AnchorHead class TestAnchorHead(TestCase): def test_anchor_head_loss(self): """T...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores import ElasticsearchStore from langchain_community.vectorstores.elasticsearch import ( ApproxRetrievalStrategy, BaseRetrievalStrategy, ExactRetrieval...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.vectorstores import ElasticsearchStore from langchain_community.vectorstores.elasticsearch import ( ApproxRetrievalStrategy, BaseRetrievalStrategy, ExactRetrieval...
from abc import ABC, abstractmethod from typing import Dict, Iterator, List, Optional, Type from typing_extensions import TYPE_CHECKING if TYPE_CHECKING: from docarray import BaseDoc, DocList class AbstractDocStore(ABC): @staticmethod @abstractmethod def list(namespace: str, show_table: bool) -> Lis...
from abc import ABC, abstractmethod from typing import Dict, Iterator, List, Optional, Type from typing_extensions import TYPE_CHECKING if TYPE_CHECKING: from docarray import BaseDoc, DocList class AbstractDocStore(ABC): @staticmethod @abstractmethod def list(namespace: str, show_table: bool) -> Lis...
_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' train_cfg = dict(max_epochs=36) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=24, by_epoch=True, ...
_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available def text_encoder_lora_state_dict(text_encoder): deprecate( "text_encoder_load_state_dict in `models`", ...
from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available def text_encoder_lora_state_dict(text_encoder): deprecate( "text_encoder_load_state_dict in `models`", ...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.runner.hooks import HOOKS from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook, annealing_cos) @HOOKS.register_module() class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook): """YOLOX learning ra...
from mmcv.runner.hooks import HOOKS from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook, annealing_cos) @HOOKS.register_module() class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook): """YOLOX learning rate scheme. There are two main differences b...
from __future__ import annotations from collections.abc import Iterable import torch.nn as nn from torch import Tensor from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCosineSimilarityLoss(Cos...
from __future__ import annotations from collections.abc import Iterable import torch.nn as nn from torch import Tensor from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCosineSimilarityLoss(Cos...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) METAINFO = { 'CLASSES': ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'dinin...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) METAINFO = { 'CLASSES': ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'dinin...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '2.16.0' short_version = __version__ def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '2.15.1' short_version = __version__ def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version...
import warnings from sys import platform from typing import Optional import torch import torchaudio dict_format = { torch.uint8: "u8", torch.int16: "s16", torch.int32: "s32", torch.int64: "s64", torch.float32: "flt", torch.float64: "dbl", } def play_audio( waveform: torch.Tensor, sam...
import warnings from sys import platform from typing import Optional import torch import torchaudio dict_format = { torch.uint8: "u8", torch.int16: "s16", torch.int32: "s32", torch.int64: "s64", torch.float32: "flt", torch.float64: "dbl", } @torchaudio._extension.fail_if_no_ffmpeg def play_a...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.saving.file_editor import KerasFileEditor from keras.src.saving.object_registration import CustomObjectScope from keras.src.saving.object_registration import ( CustomObjectScope a...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.saving.object_registration import CustomObjectScope from keras.src.saving.object_registration import ( CustomObjectScope as custom_object_scope, ) from keras.src.saving.object_reg...
"""Interface for tools.""" from typing import Optional from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool, tool class InvalidTool(BaseTool): """Tool that is run when invalid tool name is encountered by agent.""" ...
"""Interface for tools.""" from typing import Optional from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool, tool class InvalidTool(BaseTool): """Tool that is run when invalid tool name is encountered by agent.""" ...
"""Analytics API""" import logging from typing import Annotated import fastapi import pydantic import backend.data.analytics from backend.server.utils import get_user_id router = fastapi.APIRouter() logger = logging.getLogger(__name__) class LogRawMetricRequest(pydantic.BaseModel): metric_name: str = pydantic...
"""Analytics API""" import logging from typing import Annotated import fastapi import backend.data.analytics from backend.server.utils import get_user_id router = fastapi.APIRouter() logger = logging.getLogger(__name__) @router.post(path="/log_raw_metric") async def log_raw_metric( user_id: Annotated[str, fas...
# Copyright (c) OpenMMLab. All rights reserved. from unittest.mock import Mock from mmengine.hooks import SyncBuffersHook class TestSyncBuffersHook: def test_sync_buffers_hook(self): runner = Mock() runner.model = Mock() hook = SyncBuffersHook() hook._after_epoch(runner)
# Copyright (c) OpenMMLab. All rights reserved. from unittest.mock import Mock from mmengine.hooks import SyncBuffersHook class TestSyncBuffersHook: def test_sync_buffers_hook(self): Runner = Mock() Runner.model = Mock() Hook = SyncBuffersHook() Hook._after_epoch(Runner)
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_doc import BaseDoc from docarray.typing import AnyTensor from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils._internal.misc import import_library if TYPE_CHECKING: import ...
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_doc import BaseDoc from docarray.typing import AnyTensor from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils._internal.misc import import_library if TYPE_CHECKING: import ...
# Copyright (c) OpenMMLab. All rights reserved. from .misc import (check_prerequisites, concat_list, deprecated_api_warning, has_method, import_modules_from_strings, is_list_of, is_method_overridden, is_seq_of, is_str, is_tuple_of, iter_cast, list_cast, mmcv_full...
# Copyright (c) OpenMMLab. All rights reserved. from .misc import (check_prerequisites, concat_list, deprecated_api_warning, has_method, import_modules_from_strings, is_list_of, is_method_overridden, is_seq_of, is_str, is_tuple_of, iter_cast, list_cast, mmcv_full...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, ) @keras_export("keras.layers.RandomGrayscale") class RandomGrayscale(BaseImagePreprocessingLayer):...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, ) @keras_export("keras.layers.RandomGrayscale") class RandomGrayscale(BaseImagePreprocessingLayer):...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union import numpy as np from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl if TYPE_CHECKING: from pydantic import BaseConfig from pydantic.fie...
import wave from typing import TYPE_CHECKING, Any, Type, TypeVar, Union import numpy as np from pydantic import parse_obj_as from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16, AudioNdArray from docarray.typing.url.any_url import AnyUrl if TYP...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
from typing import Annotated from fastapi import FastAPI, Query app = FastAPI() @app.get("/items/") async def read_items(q: Annotated[str | None, Query(min_length=3)]): results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]} if q: results.update({"q": q}) return results
from typing import Annotated from fastapi import FastAPI, Query app = FastAPI() @app.get("/items/") async def read_items(q: Annotated[str | None, Query(min_length=3)] = ...): results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]} if q: results.update({"q": q}) return results
import random import pytest from jina import Document, DocumentArray @pytest.fixture def documents_chunk(): document_array = DocumentArray() document = Document(tags={'query_size': 35, 'query_price': 31, 'query_brand': 1}) for i in range(0, 10): chunk = Document() for j in range(0, 10): ...
import random import pytest from jina import DocumentArray, Document @pytest.fixture def documents_chunk(): document_array = DocumentArray() document = Document(tags={'query_size': 35, 'query_price': 31, 'query_brand': 1}) for i in range(0, 10): chunk = Document() for j in range(0, 10): ...
# Copyright (c) OpenMMLab. All rights reserved. from .utils import (get_device, get_max_cuda_memory, is_cuda_available, is_mlu_available, is_mps_available) __all__ = [ 'get_max_cuda_memory', 'get_device', 'is_cuda_available', 'is_mlu_available', 'is_mps_available' ]
# Copyright (c) OpenMMLab. All rights reserved. from .utils import (get_device, get_max_cuda_memory, is_cuda_available, is_mlu_available) __all__ = [ 'get_max_cuda_memory', 'get_device', 'is_cuda_available', 'is_mlu_available' ]
"""Class for a VectorStore-backed memory object.""" from collections.abc import Sequence from typing import Any, Optional, Union from langchain_core._api import deprecated from langchain_core.documents import Document from langchain_core.vectorstores import VectorStoreRetriever from pydantic import Field from langch...
"""Class for a VectorStore-backed memory object.""" from typing import Any, Dict, List, Optional, Sequence, Union from langchain_core._api import deprecated from langchain_core.documents import Document from langchain_core.vectorstores import VectorStoreRetriever from pydantic import Field from langchain.memory.chat...
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py' # dataset settings train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scale=[(1333, 640), (1333, 800)], keep_ratio=...
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py' # dataset settings train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scale=[(1333, 640), (1333, 8...
import torch from torch import nn, Tensor from typing import Any, Iterable, Dict from sentence_transformers.util import fullname from ..SentenceTransformer import SentenceTransformer class CosineSimilarityLoss(nn.Module): def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformat...
import torch from torch import nn, Tensor from typing import Any, Iterable, Dict from sentence_transformers.util import fullname from ..SentenceTransformer import SentenceTransformer class CosineSimilarityLoss(nn.Module): def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformat...
from __future__ import annotations import logging import numpy as np from torch.utils.data import IterableDataset from sentence_transformers.readers import InputExample logger = logging.getLogger(__name__) class SentenceLabelDataset(IterableDataset): """ This dataset can be used for some specific Triplet ...
from __future__ import annotations import logging import numpy as np from torch.utils.data import IterableDataset from sentence_transformers.readers import InputExample logger = logging.getLogger(__name__) class SentenceLabelDataset(IterableDataset): """ This dataset can be used for some specific Triplet ...
# mypy: allow-untyped-defs import warnings import torch import torch.distributed.algorithms.model_averaging.averagers as averagers class PostLocalSGDOptimizer(torch.optim.Optimizer): r""" Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_, This...
# mypy: allow-untyped-defs import warnings import torch import torch.distributed.algorithms.model_averaging.averagers as averagers class PostLocalSGDOptimizer(torch.optim.Optimizer): r""" Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_, This...
import numpy as np from docarray import BaseDoc from docarray.array import DocArrayStacked from docarray.array.stacked.column_storage import ColumnStorageView from docarray.typing import AnyTensor def test_document_view(): class MyDoc(BaseDoc): tensor: AnyTensor name: str docs = [MyDoc(tenso...
import numpy as np from docarray import BaseDocument from docarray.array import DocumentArrayStacked from docarray.array.stacked.column_storage import ColumnStorageView from docarray.typing import AnyTensor def test_document_view(): class MyDoc(BaseDocument): tensor: AnyTensor name: str docs...
from .document import DocumentArray from .storage.qdrant import StorageMixins, QdrantConfig __all__ = ['DocumentArrayQdrant', 'QdrantConfig'] class DocumentArrayQdrant(StorageMixins, DocumentArray): """ DocumentArray that stores Documents in a `Qdrant <https://weaviate.io/>`_ vector search engine. .. no...
from .document import DocumentArray from .storage.qdrant import StorageMixins, QdrantConfig __all__ = ['DocumentArrayQdrant', 'QdrantConfig'] class DocumentArrayQdrant(StorageMixins, DocumentArray): """This is a :class:`DocumentArray` that uses Qdrant as vector search engine and storage. """ def __n...
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
# Copyright (c) OpenMMLab. All rights reserved. import copy import unittest from unittest import TestCase import torch from mmdet.registry import MODELS from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg from mmdet.utils import register_all_modules class TestTridentRoIHead(TestCase): ...
# Copyright (c) OpenMMLab. All rights reserved. import copy import unittest from unittest import TestCase import torch from mmdet.registry import MODELS from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg from mmdet.utils import register_all_modules class TestTridentRoIHead(TestCase): ...
"""Gaussian process based regression and classification.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from . import kernels from ._gpc import GaussianProcessClassifier from ._gpr import GaussianProcessRegressor __all__ = ["GaussianProcessClassifier", "GaussianProcessRegressor", "...
"""Gaussian process based regression and classification.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from . import kernels from ._gpc import GaussianProcessClassifier from ._gpr import GaussianProcessRegressor __all__ = ["GaussianProcessRegressor", "GaussianProcessClassifier", "...
# Copyright (c) OpenMMLab. All rights reserved. # flake8: noqa from .config import * from .dataset import * from .fileio import * from .registry import * from .utils import *
# Copyright (c) OpenMMLab. All rights reserved. # flake8: noqa from .config import * from .fileio import * from .registry import * from .utils import *
# Copyright (c) OpenMMLab. All rights reserved. from torch import Tensor from mmdet.core import SampleList from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .faster_rcnn import FasterRCNN @MODELS.register_module() class TridentFasterRCNN(FasterRCNN): ""...
# Copyright (c) OpenMMLab. All rights reserved. from torch import Tensor from mmdet.core import SampleList from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .faster_rcnn import FasterRCNN @MODELS.register_module() class TridentFasterRCNN(FasterRCNN): ""...
from dataclasses import dataclass from typing import Callable, Optional import datasets @dataclass class GeneratorConfig(datasets.BuilderConfig): generator: Optional[Callable] = None gen_kwargs: Optional[dict] = None features: Optional[datasets.Features] = None def __post_init__(self): asser...
from dataclasses import dataclass from typing import Callable, Optional import datasets @dataclass class GeneratorConfig(datasets.BuilderConfig): generator: Optional[Callable] = None gen_kwargs: Optional[dict] = None features: Optional[datasets.Features] = None def __post_init__(self): asser...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class DistSamplerSeedHook(Hook): """Data-loading sampler for distributed training. When distributed training, it is only useful in conjunction with :obj:`EpochBasedRunner`, ...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.registry import HOOKS from .hook import Hook @HOOKS.register_module() class DistSamplerSeedHook(Hook): """Data-loading sampler for distributed training. When distributed training, it is only useful in conjunction with :obj:`EpochBasedRunner`, ...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
""" In this example we train a semantic search model to search through Wikipedia articles about programming articles & technologies. We use the text paragraphs from the following Wikipedia articles: Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natura...
""" In this example we train a semantic search model to search through Wikipedia articles about programming articles & technologies. We use the text paragraphs from the following Wikipedia articles: Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natura...
tta_model = dict( type='DetTTAModel', tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) img_scales = [(640, 640), (320, 320), (960, 960)] tta_pipeline = [ dict(type='LoadImageFromFile', backend_args=None), dict( type='TestTimeAug', transforms=[ [ ...
tta_model = dict( type='DetTTAModel', tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) img_scales = [(640, 640), (320, 320), (960, 960)] tta_pipeline = [ dict(type='LoadImageFromFile', backend_args=None), dict( type='TestTimeAug', transforms=[ [ ...
from __future__ import annotations from .BinaryClassificationEvaluator import BinaryClassificationEvaluator from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator from .InformationRetrievalEvaluator import InformationRetrievalEvaluator from .LabelAccuracyEvaluator import LabelAccuracyEvaluator from .MS...
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator from .InformationRetrievalEvaluator import InformationRetrievalEvaluator from .LabelAccuracyEvaluator import LabelAccuracyEvaluator from .MSEEvaluator import MSEEvaluator from ...
AMI_ID = { # Managed by XGBoost team "linux-amd64-gpu": { "us-west-2": "ami-0b4079c15bbbd0faf", }, "linux-amd64-mgpu": { "us-west-2": "ami-0b4079c15bbbd0faf", }, "windows-gpu": { "us-west-2": "ami-0123456bcf4cdfb82", }, "windows-cpu": { "us-west-2": "ami-0...
AMI_ID = { # Managed by XGBoost team "linux-amd64-gpu": { "us-west-2": "ami-070080d04e81c5e39", }, "linux-amd64-mgpu": { "us-west-2": "ami-070080d04e81c5e39", }, "windows-gpu": { "us-west-2": "ami-07c14abcf529d816a", }, "windows-cpu": { "us-west-2": "ami-0...
from keras.src import backend from keras.src.layers.input_spec import InputSpec from keras.src.layers.layer import Layer class BaseGlobalPooling(Layer): """Base global pooling layer.""" def __init__( self, pool_dimensions, data_format=None, keepdims=False, **kwargs ): super().__init__(**k...
from keras.src import backend from keras.src.layers.input_spec import InputSpec from keras.src.layers.layer import Layer class BaseGlobalPooling(Layer): """Base global pooling layer.""" def __init__( self, pool_dimensions, data_format=None, keepdims=False, **kwargs ): super().__init__(**k...
#!/usr/bin/env python3 """Evaluate the lightning module by loading the checkpoint, the SentencePiece model, and the global_stats.json. Example: python eval.py --model-type tedlium3 --checkpoint-path ./experiments/checkpoints/epoch=119-step=254999.ckpt --dataset-path ./datasets/tedlium --sp-model-path ./spm_bpe_500...
#!/usr/bin/env python3 """Evaluate the lightning module by loading the checkpoint, the SentencePiece model, and the global_stats.json. Example: python eval.py --model-type tedlium3 --checkpoint-path ./experiments/checkpoints/epoch=119-step=254999.ckpt --dataset-path ./datasets/tedlium --sp-model-path ./spm_bpe_500...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, ...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, ...
import gzip import logging import os from datetime import datetime from torch.utils.data import DataLoader from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util #### Just some code to print debug information to stdout logging.basicConfig( format="%(...
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample from sentence_transformers import models, util, datasets, evaluation, losses import logging import os import gzip from torch.utils.data import DataLoader from datetime import datetime #### Just some code to print debug information to...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.typing import AnyEmbedding, AudioUrl from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.t...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.typing import AnyEmbedding, AudioUrl from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.t...
from __future__ import annotations from collections.abc import Iterable import torch.nn as nn from torch import Tensor from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCosineSimilarityLoss(Cos...
from __future__ import annotations from collections.abc import Iterable import torch.nn as nn from torch import Tensor from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCosineSimilarityLoss(Cos...
from . import ( # noqa: F401 _extension, compliance, datasets, functional, io, kaldi_io, models, pipelines, sox_effects, transforms, utils, ) from .backend.common import AudioMetaData try: from .version import __version__, git_version # noqa: F401 except ImportError: ...
from torchaudio import ( # noqa: F401 _extension, compliance, datasets, functional, io, kaldi_io, models, pipelines, sox_effects, transforms, utils, ) try: from .version import __version__, git_version # noqa: F401 except ImportError: pass def _is_backend_dispatc...
from typing import Iterator, Dict class Offset2ID: def __init__(self, ids=None, list_like=True): self.ids = ids or [] self._list_like = list_like def get_id(self, idx): if not self._list_like: raise ValueError( "The offset2id is not enabled for list-like in...
from typing import Iterator, Dict class Offset2ID: def __init__(self, ids=None): self.ids = ids or [] def get_id(self, idx): return self.ids[idx] def append(self, data): self.ids.append(data) def extend(self, data): self.ids.extend(data) def update(self, positio...
import os from argparse import ArgumentParser import mmcv import requests import torch from mmengine.structures import InstanceData from mmdet.apis import inference_detector, init_detector from mmdet.registry import VISUALIZERS from mmdet.structures import DetDataSample from mmdet.utils import register_all_modules ...
from argparse import ArgumentParser import numpy as np import requests from mmdet.apis import inference_detector, init_detector, show_result_pyplot from mmdet.core import bbox2result def parse_args(): parser = ArgumentParser() parser.add_argument('img', help='Image file') parser.add_argument('config', h...
from typing import Optional import numpy as np import pytest from pydantic import BaseModel from typing_extensions import TypedDict from docarray import BaseDocument, DocumentArray from docarray.documents import AudioDoc, ImageDoc, TextDoc from docarray.documents.helper import create_doc, create_from_typeddict from d...
from typing import Optional import numpy as np import pytest from pydantic import BaseModel from typing_extensions import TypedDict from docarray import BaseDocument, DocumentArray from docarray.documents import Audio, Image, Text from docarray.documents.helper import create_doc, create_from_typeddict from docarray.t...
import os # When using jax.experimental.enable_x64 in unit test, we want to keep the # default dtype with 32 bits, aligning it with Keras's default. os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32" try: # When using torch and tensorflow, torch needs to be imported first, # otherwise it will segfault upon import. T...
import os # When using jax.experimental.enable_x64 in unit test, we want to keep the # default dtype with 32 bits, aligning it with Keras's default. os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32" try: # When using torch and tensorflow, torch needs to be imported first, # otherwise it will segfault upon import. T...
import importlib import threading from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem _has_s3fs = importlib.util.find_spec("s3fs") is not None if _has_s3fs: from .s3filesystem import S3FileSystem # noqa: F401 COMPRESSION_FILESYSTEMS: List[...
import importlib from typing import List import fsspec from . import compression from .hffilesystem import HfFileSystem _has_s3fs = importlib.util.find_spec("s3fs") is not None if _has_s3fs: from .s3filesystem import S3FileSystem # noqa: F401 COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSy...
""" ================================================== Principal Component Analysis (PCA) on Iris Dataset ================================================== This example shows a well known decomposition technique known as Principal Component Analysis (PCA) on the `Iris dataset <https://en.wikipedia.org/wiki/Iris_flowe...
""" ========================================================= PCA example with Iris Data-set ========================================================= Principal Component Analysis applied to the Iris dataset. See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more information on this dataset. """ ...
import collections import json import logging import os import string from typing import Iterable, List from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer logger = logging.getLogger(__name__) class PhraseTokenizer(WordTokeni...
from typing import Union, Tuple, List, Iterable, Dict import collections import string import os import json import logging from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS import nltk logger = logging.getLogger(__name__) class PhraseTokenizer(WordTokenizer): """Tokenizes the text with respect to exi...
# Copyright (c) OpenMMLab. All rights reserved. import argparse from collections import OrderedDict import torch def moco_convert(src, dst): """Convert keys in pycls pretrained moco models to mmdet style.""" # load caffe model moco_model = torch.load(src) blobs = moco_model['state_dict'] # conver...
import argparse from collections import OrderedDict import torch def moco_convert(src, dst): """Convert keys in pycls pretrained moco models to mmdet style.""" # load caffe model moco_model = torch.load(src) blobs = moco_model['state_dict'] # convert to pytorch style state_dict = OrderedDict(...
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Real import numpy as np from ..base import BaseEstimator, _fit_context from ..utils._param_validation import Interval from ..utils.sparsefuncs import mean_variance_axis, min_max_axis from ..utils.validation import chec...
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Real import numpy as np from ..base import BaseEstimator, _fit_context from ..utils._param_validation import Interval from ..utils.sparsefuncs import mean_variance_axis, min_max_axis from ..utils.validation import chec...
"""This module checks if the given python files can be imported without error.""" import sys import traceback from importlib.machinery import SourceFileLoader if __name__ == "__main__": files = sys.argv[1:] has_failure = False for file in files: try: SourceFileLoader("x", file).load_mo...
"""This module checks if the given python files can be imported without error.""" import sys import traceback from importlib.machinery import SourceFileLoader if __name__ == "__main__": files = sys.argv[1:] has_failure = False for file in files: try: SourceFileLoader("x", file).load_mo...
from __future__ import annotations from typing import Iterable import torch from torch import Tensor, nn from sentence_transformers import SentenceTransformer class MSELoss(nn.Module): def __init__(self, model: SentenceTransformer) -> None: """ Computes the MSE loss between the computed sentenc...
from typing import Dict, Iterable import torch from torch import Tensor, nn from sentence_transformers import SentenceTransformer class MSELoss(nn.Module): def __init__(self, model: SentenceTransformer) -> None: """ Computes the MSE loss between the computed sentence embedding and a target sente...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='VFNet', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], ...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='VFNet', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], ...
from .document import DocumentArray from .storage.weaviate import StorageMixins, WeaviateConfig __all__ = ['DocumentArrayWeaviate', 'WeaviateConfig'] class DocumentArrayWeaviate(StorageMixins, DocumentArray): """ DocumentArray that stores Documents in a `Weaviate <https://weaviate.io/>`_ vector search engine...
from .document import DocumentArray from .storage.weaviate import StorageMixins, WeaviateConfig __all__ = ['DocumentArrayWeaviate', 'WeaviateConfig'] class DocumentArrayWeaviate(StorageMixins, DocumentArray): """This is a :class:`DocumentArray` that uses Weaviate as vector search engine and storage. """ ...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str] arg3: Optional[str] class ProcessedResponseModel(BaseModel): ...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' ] # optimizer model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4)) optim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01))
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' ] # optimizer model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4)) optimizer = dict(type='SGD', lr=0.01)
import itertools from typing import ( TYPE_CHECKING, Union, Sequence, overload, Any, List, ) import numpy as np from docarray import Document from docarray.helper import typename if TYPE_CHECKING: # pragma: no cover from docarray.typing import ( DocumentArrayIndexType, Do...
import itertools from typing import ( TYPE_CHECKING, Union, Sequence, overload, Any, List, ) import numpy as np from docarray import Document from docarray.helper import typename if TYPE_CHECKING: from docarray.typing import ( DocumentArrayIndexType, DocumentArraySingleton...
import os import pytest from jina import Document, Flow from jinahub.indexers.searcher.compound.FaissPostgresIndexer import FaissPostgresIndexer cur_dir = os.path.dirname(os.path.abspath(__file__)) compose_yml = os.path.join(cur_dir, 'docker-compose.yml') # fixes issue #208 https://github.com/jina-ai/executors/issu...
import os import pytest from jina import Document, Flow from jinahub.indexers.searcher.compound.FaissPostgresSearcher import ( FaissPostgresSearcher, ) cur_dir = os.path.dirname(os.path.abspath(__file__)) compose_yml = os.path.join(cur_dir, 'docker-compose.yml') # fixes issue #208 https://github.com/jina-ai/exe...
import sys import traceback from importlib.machinery import SourceFileLoader if __name__ == "__main__": files = sys.argv[1:] has_failure = False for file in files: try: SourceFileLoader("x", file).load_module() except Exception: has_failure = True print(f...
import sys import traceback from importlib.machinery import SourceFileLoader if __name__ == "__main__": files = sys.argv[1:] has_failure = False for file in files: try: SourceFileLoader("x", file).load_module() except Exception: has_faillure = True print(...
# mypy: ignore-errors """ This module provides the TorchInductor backend integration for TorchDynamo. TorchInductor is a compiler backend that generates optimized code for both CPU and GPU. This module lazily imports and registers the TorchInductor compiler to avoid loading it into memory when it is not being used. T...
# mypy: ignore-errors """ This module provides the TorchInductor backend integration for TorchDynamo. TorchInductor is a compiler backend that generates optimized code for both CPU and GPU. This module lazily imports and registers the TorchInductor compiler to avoid loading it into memory when it is not being used. T...