input
stringlengths
33
5k
output
stringlengths
32
5k
import os import urllib import numpy as np import PIL import pytest from PIL import Image from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_doc.io.json import orjson_dumps from docarray.typing import ImageUrl CUR_DIR = os.path.dirname(os.path.abspath(__file__)) PATH_TO_IMAGE_DATA = os.path.j...
import os import urllib import numpy as np import pytest from PIL import Image from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_doc.io.json import orjson_dumps from docarray.typing import ImageUrl CUR_DIR = os.path.dirname(os.path.abspath(__file__)) PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR...
import warnings from abc import ABC from typing import Any, Optional from langchain_core._api import deprecated from langchain_core.chat_history import ( BaseChatMessageHistory, InMemoryChatMessageHistory, ) from langchain_core.memory import BaseMemory from langchain_core.messages import AIMessage, HumanMessag...
import warnings from abc import ABC from typing import Any, Optional from langchain_core._api import deprecated from langchain_core.chat_history import ( BaseChatMessageHistory, InMemoryChatMessageHistory, ) from langchain_core.memory import BaseMemory from langchain_core.messages import AIMessage, HumanMessag...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_document import BaseDocument from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='TextDoc') class TextDoc(BaseDocument): """ Document for handling text. It can conta...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_document import BaseDocument from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='Text') class Text(BaseDocument): """ Document for handling text. It can contain a T...
_base_ = '../htc/htc_x101-64x4d_fpn_16xb1-20e_coco.py' # learning policy max_epochs = 28 train_cfg = dict(max_epochs=max_epochs) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, ...
_base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py' # learning policy max_epochs = 28 train_cfg = dict(max_epochs=max_epochs) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, ...
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False, pad_size_divisor=32) model = dict( # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. p...
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), ...
from pathlib import Path import pytest from langchain_community.document_loaders import CSVLoader, DirectoryLoader, TextLoader from langchain_community.document_loaders.helpers import detect_file_encodings @pytest.mark.requires("chardet") def test_loader_detect_encoding_text() -> None: """Test text loader.""" ...
from pathlib import Path import pytest from langchain_community.document_loaders import CSVLoader, DirectoryLoader, TextLoader from langchain_community.document_loaders.helpers import detect_file_encodings @pytest.mark.requires("chardet") def test_loader_detect_encoding_text() -> None: """Test text loader.""" ...
# dataset settings dataset_type = 'CocoPanopticDataset' data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detection/coco/' # Meth...
# dataset settings dataset_type = 'CocoPanopticDataset' # data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) data_root = 's3://openmmlab/datasets/detection/coco/' # Meth...
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' # Enable automatic-mixed-precision training with AmpOptimWrapper. optim_wrapper = dict(type='AmpOptimWrapper')
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.)
"""Google Calendar reader.""" import datetime import os from typing import Any, List, Optional, Union from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document SCOPES = ["https://www.googleapis.com/auth/calendar.readonly"] # Copyright 2018 Google LLC # # Licensed under the Ap...
"""Google Calendar reader.""" import datetime import os from typing import Any, List, Optional, Union from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document SCOPES = ["https://www.googleapis.com/auth/calendar.readonly"] # Copyright 2018 Google LLC # # Licensed under the Ap...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, ) @keras_export("keras.layers.RandomGrayscale") class RandomGrayscale(BaseImagePreprocessingLayer):...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, ) @keras_export("keras.layers.RandomGrayscale") class RandomGrayscale(BaseImagePreprocessingLayer):...
import json from typing import Union, Sequence, Dict, Any, Callable from tenacity import ( retry, stop_after_attempt, wait_exponential, retry_if_exception_type, ) from asyncio import iscoroutinefunction from requests.exceptions import Timeout, ConnectionError from llama_index.core.base.llms.types impo...
import json from typing import Union, Sequence, Dict, Any, Callable from tenacity import ( retry, stop_after_attempt, wait_exponential, retry_if_exception_type, ) from asyncio import iscoroutinefunction from requests.exceptions import Timeout, ConnectionError from llama_index.core.base.llms.types impo...
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class HfFileSystem(AbstractFileSystem): """Interfa...
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url, hf_hub_url class HfFileSystem(AbstractFileSystem): """Interface to files in a Huggin...
from datasets import load_dataset from sentence_transformers import SentenceTransformer from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch # 1. Load the quora corpus with questions dataset = load_dataset("quora", split="train").map( lambda batch: {"text": [text for sample i...
from sentence_transformers import SentenceTransformer from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch from datasets import load_dataset # 1. Load the quora corpus with questions dataset = load_dataset("quora", split="train").map( lambda batch: {"text": [text for sample i...
# pylint: disable=invalid-name,unused-import """For compatibility and optional dependencies.""" import importlib.util import logging import sys import types from typing import Any, Sequence, cast import numpy as np from ._typing import _T assert sys.version_info[0] == 3, "Python 2 is no longer supported." def py_s...
# pylint: disable= invalid-name, unused-import """For compatibility and optional dependencies.""" import importlib.util import logging import sys import types from typing import Any, Dict, List, Optional, Sequence, cast import numpy as np from ._typing import _T assert sys.version_info[0] == 3, "Python 2 is no long...
from __future__ import annotations import sys from .BoW import BoW from .CLIPModel import CLIPModel from .CNN import CNN from .Dense import Dense from .Dropout import Dropout from .InputModule import InputModule from .LayerNorm import LayerNorm from .LSTM import LSTM from .Module import Module from .Normalize import ...
from __future__ import annotations from .Asym import Asym from .BoW import BoW from .CLIPModel import CLIPModel from .CNN import CNN from .Dense import Dense from .Dropout import Dropout from .InputModule import InputModule from .LayerNorm import LayerNorm from .LSTM import LSTM from .Module import Module from .Normal...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .single_stage_instance_seg import SingleStageInstanceSegmentor @MODELS.register_module() class CondInst(SingleStageInstanceSegmentor): """Implementation of `Cond...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from mmdet.utils.typing import ConfigType, OptConfigType, OptMultiConfig from .single_stage_instance_seg import SingleStageInstanceSegmentor @MODELS.register_module() class CondInst(SingleStageInstanceSegmentor): """Implementation ...
import torch from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from .torchscript_consistency_impl import Transforms, TransformsFloat32Only @skipIfNoCuda class TestTransformsFloat32(Transforms, TransformsFloat32Only, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda"...
import torch from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase from .torchscript_consistency_impl import Transforms, TransformsFloat32Only @skipIfNoCuda class TestTransformsFloat32(Transforms, TransformsFloat32Only, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda"...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray')...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray')...
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 8, 27, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b4.pth')), neck=dict(in_channels=[64, 128, 320, 512])) # optimi...
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 8, 27, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b4.pth')), neck=dict(in_channels=[64, 128, 320, 512])) # optimi...
"""News article reader using Newspaper.""" import logging from importlib.util import find_spec from typing import Any, Generator, List from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name__) class NewsArticleReader(BaseReader): """ ...
"""News article reader using Newspaper.""" import logging from importlib.util import find_spec from typing import Any, Generator, List from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name__) class NewsArticleReader(BaseReader): """ ...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.ndarray import NdArray MAX_INT_16 = 2**15 @_register_proto(proto_type_name='image_ndarray') class ImageNdArray(AbstractImageTensor, NdArray): "...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.ndarray import NdArray MAX_INT_16 = 2**15 @_register_proto(proto_type_name='image_ndarray') class ImageNdArray(AbstractImageTensor, NdArray): "...
import logging import sentry_sdk from sentry_sdk.integrations.anthropic import AnthropicIntegration from sentry_sdk.integrations.logging import LoggingIntegration from backend.util.settings import Settings def sentry_init(): sentry_dsn = Settings().secrets.sentry_dsn sentry_sdk.init( dsn=sentry_dsn,...
import logging import sentry_sdk from sentry_sdk.integrations.anthropic import AnthropicIntegration from sentry_sdk.integrations.logging import LoggingIntegration from backend.util.settings import Settings def sentry_init(): sentry_dsn = Settings().secrets.sentry_dsn sentry_sdk.init( dsn=sentry_dsn,...
""" Experimental Object Oriented Distributed API - torch.distributed._dist2 ======================================================================= This is an experimental new API for PyTorch Distributed. This is actively in development and subject to change or deletion entirely. This is intended as a proving ground ...
""" Experimental Object Oriented Distributed API - torch.distributed._dist2 ======================================================================= This is an experimental new API for PyTorch Distributed. This is actively in development and subject to change or deletion entirely. This is intended as a proving ground ...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser...
import logging from autogpt_libs.auth.middleware import auth_middleware from fastapi import APIRouter, Depends, HTTPException from backend.server.utils import get_user_id from .models import ApiResponse, ChatRequest from .service import OttoService logger = logging.getLogger(__name__) router = APIRouter() @route...
import logging from autogpt_libs.auth.middleware import auth_middleware from fastapi import APIRouter, Depends, HTTPException from backend.server.utils import get_user_id from .models import ApiResponse, ChatRequest from .service import OttoService logger = logging.getLogger(__name__) router = APIRouter() @route...
"""Tool for the Google search API.""" from typing import Optional from langchain_core._api.deprecation import deprecated from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from langchain_community.utilities.google_search import GoogleSearchAPIWrapper @deprecate...
"""Tool for the Google search API.""" from typing import Optional from langchain_core._api.deprecation import deprecated from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from langchain_community.utilities.google_search import GoogleSearchAPIWrapper @deprecate...
import glob import os import pytest from jina import Document, Flow from jina.constants import __uptime__, __windows__ from jina.enums import LogVerbosity from jina.helper import colored from jina.logging.logger import JinaLogger cur_dir = os.path.dirname(os.path.abspath(__file__)) def log(logger: JinaLogger): ...
import glob import os import pytest from jina import Document, Flow from jina.constants import __uptime__, __windows__ from jina.enums import LogVerbosity from jina.helper import colored from jina.logging.logger import JinaLogger cur_dir = os.path.dirname(os.path.abspath(__file__)) def log(logger: JinaLogger): ...
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING, Dict if TYPE_CHECKING: import numpy as np from docarray.typing import ArrayType from docarray import DocumentArray class MatchMixin: """A mixin that provides match functionality to DocumentArrays""" def match( self, ...
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING, Dict if TYPE_CHECKING: import numpy as np from docarray.typing import ArrayType from docarray import DocumentArray class MatchMixin: """A mixin that provides match functionality to DocumentArrays""" def match( self, ...
import numpy as np import pytest import torch from docarray.base_document import BaseDocument from docarray.base_document.io.json import orjson_dumps from docarray.typing import AnyUrl, NdArray, TorchTensor @pytest.fixture() def doc_and_class(): class Mmdoc(BaseDocument): img: NdArray url: AnyUrl...
import numpy as np import pytest import torch from docarray.base_document import BaseDocument from docarray.base_document.io.json import orjson_dumps from docarray.typing import AnyUrl, NdArray, TorchTensor @pytest.fixture() def doc_and_class(): class Mmdoc(BaseDocument): img: NdArray url: AnyUrl...
import pathlib from collections.abc import Iterator from typing import Any, BinaryIO, Union from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvision.prototype.datasets.utils._internal ...
import pathlib from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvision.prototype.datasets.utils._internal import (...
import numpy as np import pytest from absl.testing import parameterized from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import ops from keras.src import testing class RandomGrayscaleTest(testing.TestCase): @pytest.mark.requires_trainable_backend ...
import numpy as np import pytest from absl.testing import parameterized from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import ops from keras.src import testing class RandomGrayscaleTest(testing.TestCase): @pytest.mark.requires_trainable_backend ...
from ._source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, SourceSeparationBundle from ._tts import ( TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH, TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH, TACOTRON2_WAVERNN_CHAR_LJSPEECH, TACOTRON2_WAVERNN_PHONE_LJSPEECH, Tacotron2TTSBundle, ) from ._wav2vec2.impl import...
from ._tts import ( TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH, TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH, TACOTRON2_WAVERNN_CHAR_LJSPEECH, TACOTRON2_WAVERNN_PHONE_LJSPEECH, Tacotron2TTSBundle, ) from ._wav2vec2.impl import ( HUBERT_ASR_LARGE, HUBERT_ASR_XLARGE, HUBERT_BASE, HUBERT_LARGE, HUBE...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .boxinst import BoxInst from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .condinst import CondInst from .cornernet import CornerNet from .crowddet impo...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .condinst import CondInst from .cornernet import CornerNet from .crowddet import CrowdDet from .d2_wrapper ...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# mypy: allow-untyped-defs import functools from collections.abc import Hashable from dataclasses import dataclass, fields from typing import TypeVar from typing_extensions import dataclass_transform T = TypeVar("T", bound="_Union") class _UnionTag(str): __slots__ = ("_cls",) _cls: Hashable @staticmeth...
# mypy: allow-untyped-defs import functools from collections.abc import Hashable from dataclasses import dataclass, fields from typing import TypeVar from typing_extensions import dataclass_transform T = TypeVar("T", bound="_Union") class _UnionTag(str): __slots__ = ("_cls",) _cls: Hashable @staticmeth...
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import pickle from inspect import signature import pytest from sklearn.utils.deprecation import _is_deprecated, deprecated @deprecated("qwerty") class MockClass1: pass class MockClass2: @deprecated("mockclass2_method") de...
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import pickle from inspect import signature import pytest from sklearn.utils.deprecation import _is_deprecated, deprecated @deprecated("qwerty") class MockClass1: pass class MockClass2: @deprecated("mockclass2_method") de...
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' model = dict( neck=dict( type='PAFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5))
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( neck=dict( type='PAFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5))
""" The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset with softmax loss function. At every 1000 training steps, the model is evaluated on the STS benchmark dataset Usage: python training_nli.py OR python training_nli.py pretrained_transformer...
""" The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset with softmax loss function. At every 1000 training steps, the model is evaluated on the STS benchmark dataset Usage: python training_nli.py OR python training_nli.py pretrained_transformer...
from __future__ import annotations import pytest from sentence_transformers.cross_encoder import CrossEncoder @pytest.mark.parametrize( "model_name, expected_score", [ ("cross-encoder/ms-marco-MiniLM-L-6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]), ("cross-encoder/ms-ma...
from __future__ import annotations import pytest from sentence_transformers.cross_encoder import CrossEncoder @pytest.mark.parametrize( "model_name, expected_score", [ ("cross-encoder/ms-marco-MiniLM-L-6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]), ("cross-encoder/ms-ma...
r"""Utility classes & functions for data loading. Code in this folder is mostly used by ../dataloder.py. A lot of multiprocessing is used in data loading, which only supports running functions defined in global environment (py2 can't serialize static methods). Therefore, for code tidiness we put these functions into d...
# mypy: allow-untyped-defs r"""Utility classes & functions for data loading. Code in this folder is mostly used by ../dataloder.py. A lot of multiprocessing is used in data loading, which only supports running functions defined in global environment (py2 can't serialize static methods). Therefore, for code tidiness we...
from typing import TYPE_CHECKING, Dict, Type from docarray.array.abstract_array import AbstractDocumentArray from docarray.typing.tensor.abstract_tensor import AbstractTensor if TYPE_CHECKING: from docarray.proto import DocumentArrayProto, NodeProto class ProtoArrayMixin(AbstractDocumentArray): @classmethod...
from typing import TYPE_CHECKING, Type from docarray.array.abstract_array import AbstractDocumentArray if TYPE_CHECKING: from docarray.proto import DocumentArrayProto, NodeProto class ProtoArrayMixin(AbstractDocumentArray): @classmethod def from_protobuf( cls: Type[AbstractDocumentArray], pb_msg...
from logging import Logger from backend.util.settings import AppEnvironment, BehaveAs, Settings settings = Settings() def configure_logging(): import logging import autogpt_libs.logging.config if ( settings.config.behave_as == BehaveAs.LOCAL or settings.config.app_env == AppEnvironment...
from backend.util.settings import AppEnvironment, BehaveAs, Settings settings = Settings() def configure_logging(): import logging import autogpt_libs.logging.config if ( settings.config.behave_as == BehaveAs.LOCAL or settings.config.app_env == AppEnvironment.LOCAL ): autogp...
from datetime import datetime, timedelta from backend.blocks.hubspot._auth import ( HubSpotCredentials, HubSpotCredentialsField, HubSpotCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request impo...
from datetime import datetime, timedelta from backend.blocks.hubspot._auth import ( HubSpotCredentials, HubSpotCredentialsField, HubSpotCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request impo...
__version__ = '0.13.6' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
__version__ = '0.13.5' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import AIPluginTool from langchain_community.tools.plugin import AIPlugin, AIPluginToolSchema, ApiConfig # Create a way to dynamically look up deprecated imports. # Used to consol...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import AIPluginTool from langchain_community.tools.plugin import AIPlugin, AIPluginToolSchema, ApiConfig # Create a way to dynamically look up deprecated imports. # Used to consol...
import pytest from jina import Executor, Flow, requests from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet from jina.clients.request.helper import _new_data_request from jina.excepts import BadServer from jina.logging.logger import JinaLogger from jina.types.request.data import DataRequest logger ...
import aiohttp import pytest from jina import Executor, Flow, requests from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet from jina.clients.request.helper import _new_data_request from jina.excepts import BadServer from jina.logging.logger import JinaLogger from jina.types.request.data import DataR...
import sys import tempfile from unittest.mock import patch from keras.src.testing import test_case from keras.src.utils import io_utils class TestIoUtils(test_case.TestCase): def test_enable_interactive_logging(self): io_utils.enable_interactive_logging() self.assertTrue(io_utils.is_interactive_l...
from unittest.mock import patch from keras.src.testing import test_case from keras.src.utils import io_utils class TestIoUtils(test_case.TestCase): def test_enable_interactive_logging(self): io_utils.enable_interactive_logging() self.assertTrue(io_utils.is_interactive_logging_enabled()) def ...
import json import re from typing import TypeVar import yaml from langchain_core.exceptions import OutputParserException from langchain_core.output_parsers import BaseOutputParser from pydantic import BaseModel, ValidationError from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS T = Typ...
import json import re from typing import TypeVar import yaml from langchain_core.exceptions import OutputParserException from langchain_core.output_parsers import BaseOutputParser from pydantic import BaseModel, ValidationError from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS T = Typ...
"""Init file of LlamaIndex.""" __version__ = "0.12.33.post1" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index...
"""Init file of LlamaIndex.""" __version__ = "0.12.32" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .compat_config import compat_cfg from .logger import get_caller_name, get_root_logger, log_img_scale from .misc import find_latest_checkpoint, update_data_root from .setup_env import setup_multi_processes from .split_batch import ...
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .compat_config import compat_cfg from .logger import get_caller_name, get_root_logger, log_img_scale from .misc import find_latest_checkpoint, update_data_root from .setup_env import setup_multi_processes from .split_batch import ...
import logging from typing import List import numpy as np from torch.utils.data import IterableDataset from sentence_transformers.readers import InputExample logger = logging.getLogger(__name__) class SentenceLabelDataset(IterableDataset): """ This dataset can be used for some specific Triplet Losses like ...
""" """ from torch.utils.data import IterableDataset import numpy as np from typing import List from ..readers import InputExample import logging logger = logging.getLogger(__name__) class SentenceLabelDataset(IterableDataset): """ This dataset can be used for some specific Triplet Losses like BATCH_HARD_TR...
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray __all__ = ['AudioNdArray'] from docarray.utils.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa _...
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray __all__ = ['AudioNdArray'] try: import torch # noqa: F401 except ImportError: pass else: from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa __all__.extend(['AudioTorchTensor'])
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
from datetime import datetime, timezone from unittest.mock import AsyncMock import pytest from fastapi import WebSocket from backend.data.execution import ExecutionResult, ExecutionStatus from backend.server.conn_manager import ConnectionManager from backend.server.model import Methods, WsMessage @pytest.fixture de...
from datetime import datetime, timezone from unittest.mock import AsyncMock import pytest from fastapi import WebSocket from backend.data.execution import ExecutionResult, ExecutionStatus from backend.server.conn_manager import ConnectionManager from backend.server.model import Methods, WsMessage @pytest.fixture de...
"""**Chat Models** are a variation on language models. While Chat Models use language models under the hood, the interface they expose is a bit different. Rather than expose a "text in, text out" API, they expose an interface where "chat messages" are the inputs and outputs. **Class hierarchy:** .. code-block:: ...
"""**Chat Models** are a variation on language models. While Chat Models use language models under the hood, the interface they expose is a bit different. Rather than expose a "text in, text out" API, they expose an interface where "chat messages" are the inputs and outputs. **Class hierarchy:** .. code-block:: ...
from __future__ import annotations from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import ( CSRReconstructionLoss, ) from sentence_transformers.sparse_encoder.losses.FlopsLoss import FlopsLoss from sentence_transformers....
from __future__ import annotations from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import ( CSRReconstructionLoss, ) from sentence_transformers.sparse_encoder.losses.SparseAnglELoss import SparseAnglELoss from sentence_t...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp from typing import Optional, Sequence, Tuple import cv2 import numpy as np from mmengine.data import BaseDataElement from mmengine.hooks import Hook from mmengine.registry import HOOKS from mmengine.utils.misc import tensor2imgs @HOOKS.register_m...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp from typing import Any, Optional, Sequence, Tuple import cv2 import numpy as np from mmengine.data import BaseDataElement from mmengine.hooks import Hook from mmengine.registry import HOOKS from mmengine.utils.misc import tensor2imgs @HOOKS.regis...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine.structures import InstanceData from mmengine.testing import assert_allclose from mmdet.models.task_modules.assigners import GridAssigner class TestGridAssigner(TestCase): def test_assign(self): assi...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine.data import InstanceData from mmengine.testing import assert_allclose from mmdet.models.task_modules.assigners import GridAssigner class TestGridAssigner(TestCase): def test_assign(self): assigner =...
from typing import Any from llama_index.core.agent import ReActAgentWorker, StructuredPlannerAgent from llama_index.core.agent.runner.planner import Plan, SubTask from llama_index.core.llms.custom import CustomLLM from llama_index.core.llms import LLMMetadata, CompletionResponse, CompletionResponseGen from llama_index...
from typing import Any from llama_index.core.agent import ReActAgentWorker, StructuredPlannerAgent from llama_index.core.agent.runner.planner import Plan, SubTask from llama_index.core.llms.custom import CustomLLM from llama_index.core.llms import LLMMetadata, CompletionResponse, CompletionResponseGen from llama_index...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet import * # noqa from mmdet.structures import DetDataSample from mmdet.testing import demo_mm_inputs, get_detector_cfg from mmdet.utils import register_all_modu...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet import * # noqa from mmdet.structures import DetDataSample from mmdet.testing import demo_mm_inputs, get_detector_cfg from mmdet.utils import register_all_modu...
_base_ = './yolox_s_8xb8-300e_coco.py' # model settings model = dict( data_preprocessor=dict(batch_augments=[ dict( type='BatchSyncRandomResize', random_size_range=(320, 640), size_divisor=32, interval=10) ]), backbone=dict(deepen_factor=0.33, widen_f...
_base_ = './yolox_s_8xb8-300e_coco.py' # model settings model = dict( data_preprocessor=dict(batch_augments=[ dict( type='BatchSyncRandomResize', random_size_range=(320, 640), size_divisor=32, interval=10) ]), backbone=dict(deepen_factor=0.33, widen_f...
import pytest from llama_index.core.extractors import DocumentContextExtractor from llama_index.core.llms import ChatMessage, ChatResponse, MockLLM from llama_index.core.schema import Document, NodeRelationship, TextNode from llama_index.core.storage.docstore.simple_docstore import SimpleDocumentStore @pytest.fixtur...
import pytest from llama_index.core.extractors import DocumentContextExtractor from llama_index.core.llms import ChatMessage, ChatResponse, MockLLM from llama_index.core.schema import Document, NodeRelationship, TextNode from llama_index.core.storage.docstore.simple_docstore import SimpleDocumentStore @pytest.fixtur...
import json from typing import Any, Dict, Optional, Tuple from llama_index.core.schema import ( BaseNode, ImageNode, IndexNode, NodeRelationship, RelatedNodeInfo, TextNode, ) DEFAULT_TEXT_KEY = "text" DEFAULT_EMBEDDING_KEY = "embedding" DEFAULT_DOC_ID_KEY = "doc_id" def _validate_is_flat_dic...
import json from typing import Any, Dict, Optional, Tuple from llama_index.core.schema import ( BaseNode, ImageNode, IndexNode, NodeRelationship, RelatedNodeInfo, TextNode, ) DEFAULT_TEXT_KEY = "text" DEFAULT_EMBEDDING_KEY = "embedding" DEFAULT_DOC_ID_KEY = "doc_id" def _validate_is_flat_dic...
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric...
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric...
from __future__ import annotations import json from typing import Optional, Type import requests import yaml from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool from pydantic import BaseModel class ApiConfig(BaseModel)...
from __future__ import annotations import json from typing import Optional, Type import requests import yaml from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool from pydantic import BaseModel class ApiConfig(BaseModel)...
from langchain_core.agents import AgentAction from langchain.agents.format_scratchpad.log import format_log_to_str def test_single_agent_action_observation() -> None: intermediate_steps = [ (AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1"), ] expected_result = "Log1\nOb...
from langchain_core.agents import AgentAction from langchain.agents.format_scratchpad.log import format_log_to_str def test_single_agent_action_observation() -> None: intermediate_steps = [ (AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1") ] expected_result = "Log1\nObs...
"""Azure Translate tool spec.""" import requests from llama_index.core.tools.tool_spec.base import BaseToolSpec ENDPOINT_BASE_URL = "https://api.cognitive.microsofttranslator.com/translate" class AzureTranslateToolSpec(BaseToolSpec): """Azure Translate tool spec.""" spec_functions = ["translate"] def ...
"""Azure Translate tool spec.""" import requests from llama_index.core.tools.tool_spec.base import BaseToolSpec ENDPOINT_BASE_URL = "https://api.cognitive.microsofttranslator.com/translate" class AzureTranslateToolSpec(BaseToolSpec): """Azure Translate tool spec.""" spec_functions = ["translate"] def ...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.models.utils.misc import get_box_tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import HorizontalBoxes from .base_bbox_coder import BaseBBoxCoder @TASK_UTILS.register_module() class YOLOBBoxCoder(BaseBBoxCoder): """Y...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.registry import TASK_UTILS from .base_bbox_coder import BaseBBoxCoder @TASK_UTILS.register_module() class YOLOBBoxCoder(BaseBBoxCoder): """YOLO BBox coder. Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide imag...
import logging import sentry_sdk from sentry_sdk.integrations.anthropic import AnthropicIntegration from sentry_sdk.integrations.logging import LoggingIntegration from backend.util.settings import Settings def sentry_init(): sentry_dsn = Settings().secrets.sentry_dsn sentry_sdk.init( dsn=sentry_dsn,...
import logging import sentry_sdk from sentry_sdk.integrations.anthropic import AnthropicIntegration from sentry_sdk.integrations.logging import LoggingIntegration from backend.util.settings import Settings def sentry_init(): sentry_dsn = Settings().secrets.sentry_dsn sentry_sdk.init( dsn=sentry_dsn,...
# Copyright (c) OpenMMLab. All rights reserved. import logging import os.path as osp from argparse import ArgumentParser from mmcv import Config from mmdet.apis import inference_detector, init_detector, show_result_pyplot from mmdet.utils import get_root_logger def parse_args(): parser = ArgumentParser() pa...
import logging import os.path as osp from argparse import ArgumentParser from mmcv import Config from mmdet.apis import inference_detector, init_detector, show_result_pyplot from mmdet.utils import get_root_logger def parse_args(): parser = ArgumentParser() parser.add_argument('config', help='test config fi...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.registry import MODELS from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg from mmdet.utils import register_all_modules class TestPI...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.registry import MODELS from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg from mmdet.utils import register_all_modules class TestPI...
from pathlib import Path from typing import Any, Callable, Optional, Tuple, Union from PIL import Image from .folder import find_classes, make_dataset from .utils import download_and_extract_archive, verify_str_arg from .vision import VisionDataset class Imagenette(VisionDataset): """`Imagenette <https://github...
from pathlib import Path from typing import Any, Callable, Optional, Tuple, Union from PIL import Image from .folder import find_classes, make_dataset from .utils import download_and_extract_archive, verify_str_arg from .vision import VisionDataset class Imagenette(VisionDataset): """`Imagenette <https://github...
from pathlib import Path from typing import Any, List, Union from langchain_community.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) class UnstructuredTSVLoader(UnstructuredFileLoader): """Load `TSV` files using `Unstructured`. Like other Unstruct...
from pathlib import Path from typing import Any, List, Union from langchain_community.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) class UnstructuredTSVLoader(UnstructuredFileLoader): """Load `TSV` files using `Unstructured`. Like other Unstruct...
"""Chat generation output classes.""" from __future__ import annotations from typing import TYPE_CHECKING, Literal, Union from pydantic import model_validator from langchain_core.messages import BaseMessage, BaseMessageChunk from langchain_core.outputs.generation import Generation from langchain_core.utils._merge i...
"""Chat generation output classes.""" from __future__ import annotations from typing import TYPE_CHECKING, Literal, Union from pydantic import model_validator from langchain_core.messages import BaseMessage, BaseMessageChunk from langchain_core.outputs.generation import Generation from langchain_core.utils._merge i...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.llms import GradientLLM from langchain_community.llms.gradient_ai import TrainResult # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising de...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.llms import GradientLLM from langchain_community.llms.gradient_ai import TrainResult # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising de...
import os import warnings import torch from torchvision import datasets, io, models, ops, transforms, utils from .extension import _HAS_OPS try: from .version import __version__ # noqa: F401 except ImportError: pass # Check if torchvision is being imported within the root folder if not _HAS_OPS and os.path...
import os import warnings from modulefinder import Module import torch from torchvision import datasets, io, models, ops, transforms, utils from .extension import _HAS_OPS, _load_library try: from .version import __version__ # noqa: F401 except ImportError: pass try: _load_library("Decoder") _HAS_G...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
from typing import Optional import numpy as np from docarray import BaseDoc, DocList from docarray.documents import ImageDoc from docarray.typing import AnyTensor, ImageUrl from jina import Deployment, Executor, Flow, requests def test_different_document_schema(): class Image(BaseDoc): tensor: Optional[...
from typing import Optional import numpy as np from docarray import BaseDoc from docarray import DocArray as DocumentArray from docarray.documents import ImageDoc from docarray.typing import AnyTensor, ImageUrl from jina import Deployment, Executor, Flow, requests def test_different_document_schema(): class Ima...
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDocument from docarray.base_document.io.json import orjson_dumps from docarray.typing import AudioTorchTensor, AudioUrl from tests import TOYDATA_DIR AUDIO_FILES ...
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDocument from docarray.base_document.io.json import orjson_dumps from docarray.typing import AudioTorchTensor, AudioUrl from tests import TOYDATA_DIR AUDIO_FILES ...
__all__ = ["LoggingCallbackHandler"] import logging from typing import Any, Optional from uuid import UUID from langchain_core.exceptions import TracerException from langchain_core.tracers.stdout import FunctionCallbackHandler from langchain_core.utils.input import get_bolded_text, get_colored_text class LoggingCal...
__all__ = ["LoggingCallbackHandler"] import logging from typing import Any, Optional from uuid import UUID from langchain_core.exceptions import TracerException from langchain_core.tracers.stdout import FunctionCallbackHandler from langchain_core.utils.input import get_bolded_text, get_colored_text class LoggingCal...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) fil...
""" Demo for accessing the xgboost eval metrics by using sklearn interface ====================================================================== """ import numpy as np from sklearn.datasets import make_hastie_10_2 import xgboost as xgb X, y = make_hastie_10_2(n_samples=2000, random_state=42) # Map labels from {-1,...
""" Demo for accessing the xgboost eval metrics by using sklearn interface ====================================================================== """ import numpy as np from sklearn.datasets import make_hastie_10_2 import xgboost as xgb X, y = make_hastie_10_2(n_samples=2000, random_state=42) # Map labels from {-1,...
from abc import abstractmethod from typing import Any, Type, TypeVar from pydantic import BaseConfig from pydantic.fields import ModelField from docarray.base_doc.base_node import BaseNode T = TypeVar('T') class AbstractType(BaseNode): @classmethod def __get_validators__(cls): yield cls.validate ...
from abc import abstractmethod from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar from pydantic import BaseConfig from pydantic.fields import ModelField from docarray.base_doc.base_node import BaseNode if TYPE_CHECKING: from docarray.proto import NodeProto T = TypeVar('T') class AbstractType(BaseN...
import pytest import torch from torchvision.prototype import datapoints @pytest.mark.parametrize( ("data", "input_requires_grad", "expected_requires_grad"), [ ([0.0], None, False), ([0.0], False, False), ([0.0], True, True), (torch.tensor([0.0], requires_grad=False), None, Fals...
import pytest import torch from torchvision.prototype import datapoints def test_isinstance(): assert isinstance( datapoints.Label([0, 1, 0], categories=["foo", "bar"]), torch.Tensor, ) def test_wrapping_no_copy(): tensor = torch.tensor([0, 1, 0], dtype=torch.int64) label = datapoint...
"""Standard LangChain interface tests""" from langchain_core.language_models import BaseChatModel from langchain_tests.unit_tests import ChatModelUnitTests from langchain_anthropic import ChatAnthropic class TestAnthropicStandard(ChatModelUnitTests): @property def chat_model_class(self) -> type[BaseChatMode...
"""Standard LangChain interface tests""" from typing import Type from langchain_core.language_models import BaseChatModel from langchain_tests.unit_tests import ChatModelUnitTests from langchain_anthropic import ChatAnthropic class TestAnthropicStandard(ChatModelUnitTests): @property def chat_model_class(s...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool as average_pool from keras.src.ops.nn import batch_normalization as batch_normalization from keras.src.ops.nn import binary_crossentropy as binary_crossentr...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool as average_pool from keras.src.ops.nn import batch_normalization as batch_normalization from keras.src.ops.nn import binary_crossentropy as binary_crossentr...
""" =================================== How to write your own v2 transforms =================================== .. note:: Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_ or :ref:`go to the end <sphx_glr_downlo...
""" =================================== How to write your own v2 transforms =================================== .. note:: Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_ or :ref:`go to the end <sphx_glr_downlo...
import logging import requests from fastapi import Request from backend.data import integrations from backend.data.model import APIKeyCredentials, Credentials from backend.integrations.providers import ProviderName from backend.integrations.webhooks.base import BaseWebhooksManager logger = logging.getLogger(__name__...
import logging from typing import ClassVar import requests from fastapi import Request from backend.data import integrations from backend.data.model import APIKeyCredentials, Credentials from backend.integrations.webhooks.base import BaseWebhooksManager logger = logging.getLogger(__name__) class Slant3DWebhooksMan...
from keras.src.api_export import keras_export from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"]) class MaxPooling3D(BasePooling): """Max pooling operation for 3D data (spatial or spatio-temporal). Downsamples the input along it...
from keras.src.api_export import keras_export from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"]) class MaxPooling3D(BasePooling): """Max pooling operation for 3D data (spatial or spatio-temporal). Downsamples the input along it...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class FCOS(SingleStageDetector): """Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_""" def __init__(self, backbone, ...
from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class FCOS(SingleStageDetector): """Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_""" def __init__(self, backbone, neck, bbox_head, ...
_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' # learning policy max_epochs = 36 train_cfg = dict(max_epochs=max_epochs) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=ma...
_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
import subprocess import pytest from jina import Document, DocumentArray, Flow from ...clip_text import CLIPTextEncoder _EMBEDDING_DIM = 512 @pytest.mark.parametrize('request_size', [1, 10, 50, 100]) def test_integration(request_size: int): docs = DocumentArray( [Document(text='just some random text he...
import subprocess import pytest from jina import Document, DocumentArray, Flow from ...clip_text import CLIPTextEncoder _EMBEDDING_DIM = 512 @pytest.mark.parametrize('request_size', [1, 10, 50, 100]) def test_integration(request_size: int): docs = DocumentArray( [Document(text='just some random text he...
"""Fake Embedding class for testing purposes.""" import math from langchain_core.embeddings import Embeddings fake_texts = ["foo", "bar", "baz"] class FakeEmbeddings(Embeddings): """Fake embeddings functionality for testing.""" def embed_documents(self, texts: list[str]) -> list[list[float]]: """R...
"""Fake Embedding class for testing purposes.""" import math from typing import List from langchain_core.embeddings import Embeddings fake_texts = ["foo", "bar", "baz"] class FakeEmbeddings(Embeddings): """Fake embeddings functionality for testing.""" def embed_documents(self, texts: List[str]) -> List[Li...
import types from typing_extensions import TYPE_CHECKING from docarray.typing.tensor.audio import AudioNdArray from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding from docarray.typing.tensor.image import ImageNdArray, ImageTensor from docarray.typing.tensor.ndarray import NdArray from docarray...
import types from typing_extensions import TYPE_CHECKING from docarray.typing.tensor.audio import AudioNdArray from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding from docarray.typing.tensor.image import ImageNdArray, ImageTensor from docarray.typing.tensor.ndarray import NdArray from docarray...
# Copyright (c) OpenMMLab. All rights reserved. from .backbones import * # noqa: F401,F403 from .data_preprocessors import * # noqa: F401,F403 from .dense_heads import * # noqa: F401,F403 from .detectors import * # noqa: F401,F403 from .language_models import * # noqa: F401,F403 from .layers import * # noqa: F401...
# Copyright (c) OpenMMLab. All rights reserved. from .backbones import * # noqa: F401,F403 from .data_preprocessors import * # noqa: F401,F403 from .dense_heads import * # noqa: F401,F403 from .detectors import * # noqa: F401,F403 from .layers import * # noqa: F401,F403 from .losses import * # noqa: F401,F403 fro...
import types from typing import TYPE_CHECKING from docarray.store.file import FileDocStore from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docarray.store.jac import JACDocStore # noqa: F401 from docarray.store.s3 import S3DocSto...
from docarray.store.file import FileDocStore from docarray.store.jac import JACDocStore from docarray.store.s3 import S3DocStore __all__ = ['JACDocStore', 'FileDocStore', 'S3DocStore']
import enum from typing import Any, Optional import pydantic from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash from backend.data.graph import Graph class WSMethod(enum.Enum): SUBSCRIBE_GRAPH_EXEC = "subscribe_graph_execution" UNSUBSCRIBE = "unsubscribe" GRAPH_EXECUTION_EVENT = "graph_...
import enum from typing import Any, List, Optional, Union import pydantic import backend.data.graph from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash class Methods(enum.Enum): SUBSCRIBE = "subscribe" UNSUBSCRIBE = "unsubscribe" EXECUTION_EVENT = "execution_event" ERROR = "error" ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api.utils import bounding_boxes from keras.api.utils import legacy from keras.src.backend.common.global_state import clear_session from keras.src.backend.common.keras_tensor import is_ker...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api.utils import legacy from keras.src.backend.common.global_state import clear_session from keras.src.backend.common.keras_tensor import is_keras_tensor from keras.src.backend.common.var...
import json import os from typing import Dict import torch from torch import Tensor, nn from sentence_transformers.util import fullname, import_from_string class Dense(nn.Module): """ Feed-forward function with activiation function. This layer takes a fixed-sized sentence embedding and passes it throu...
import torch from torch import Tensor from torch import nn from typing import Dict import os import json from ..util import fullname, import_from_string class Dense(nn.Module): """ Feed-forward function with activiation function. This layer takes a fixed-sized sentence embedding and passes it through a ...