input
stringlengths
33
5k
output
stringlengths
32
5k
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applica...
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import ContributorDetails, SchemaField class ReadCsvBlock(Block): class Input(BlockSchema): contents: str = SchemaField( description="The contents of the CSV file to read", placeho...
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import ContributorDetails, SchemaField class ReadCsvBlock(Block): class Input(BlockSchema): contents: str = SchemaField( description="The contents of the CSV file to read", placeho...
import torch from torchaudio_unittest.common_utils import PytorchTestCase from .tacotron2_loss_impl import ( Tacotron2LossGradcheckTests, Tacotron2LossShapeTests, Tacotron2LossTorchscriptTests, ) class TestTacotron2LossShapeFloat32CPU(Tacotron2LossShapeTests, PytorchTestCase): dtype = torch.float32 ...
import torch from torchaudio_unittest.common_utils import PytorchTestCase from .tacotron2_loss_impl import ( Tacotron2LossShapeTests, Tacotron2LossTorchscriptTests, Tacotron2LossGradcheckTests, ) class TestTacotron2LossShapeFloat32CPU(Tacotron2LossShapeTests, PytorchTestCase): dtype = torch.float32 ...
_base_ = './faster-rcnn_r50_fpn_1x_coco.py' model = dict( data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False, pad_size_divisor=32), backbone=dict( norm_cfg=dict(requires_grad=False), ...
_base_ = './faster-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_c...
import numpy as np from docarray.proto import NodeProto from docarray.typing.url.any_url import AnyUrl class ImageUrl(AnyUrl): def _to_node_protobuf(self) -> NodeProto: """Convert Document into a NodeProto protobuf message. This function should be called when the Document is nested into another D...
import numpy as np from .any_url import AnyUrl class ImageUrl(AnyUrl): def load(self) -> np.ndarray: """ transform the url in a image Tensor this is just a patch we will move the function from old docarray :return: tensor image """ return np.zeros((3, 224, 224))
import sys from typing import Callable import pytest from langchain_core.runnables.base import RunnableLambda from langchain_core.runnables.utils import ( get_function_nonlocals, get_lambda_source, indent_lines_after_first, ) @pytest.mark.skipif( sys.version_info < (3, 9), reason="Requires python ve...
import sys from typing import Callable import pytest from langchain_core.runnables.base import RunnableLambda from langchain_core.runnables.utils import ( get_function_nonlocals, get_lambda_source, indent_lines_after_first, ) @pytest.mark.skipif( sys.version_info < (3, 9), reason="Requires python ve...
import gzip import logging import os from datetime import datetime from torch.utils.data import DataLoader from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util #### Just some code to print debug information to stdout logging.basicConfig( format="%(...
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample from sentence_transformers import models, util, evaluation, losses import logging import os import gzip from torch.utils.data import DataLoader from datetime import datetime #### Just some code to print debug information to stdout log...
from typing import Any, Optional import pytest from langchain_core.callbacks import CallbackManagerForChainRun from langchain.callbacks import StdOutCallbackHandler from langchain.chains.base import Chain class FakeChain(Chain): """Fake chain class for testing purposes.""" be_correct: bool = True the_i...
from typing import Any, Optional import pytest from langchain.callbacks import StdOutCallbackHandler from langchain.chains.base import CallbackManagerForChainRun, Chain class FakeChain(Chain): """Fake chain class for testing purposes.""" be_correct: bool = True the_input_keys: list[str] = ["foo"] t...
from ._dsp import ( adsr_envelope, exp_sigmoid, extend_pitch, filter_waveform, frequency_impulse_response, oscillator_bank, sinc_impulse_response, ) from ._rir import ray_tracing, simulate_rir_ism from .functional import barkscale_fbanks, chroma_filterbank __all__ = [ "adsr_envelope", ...
from ._dsp import ( adsr_envelope, exp_sigmoid, extend_pitch, filter_waveform, frequency_impulse_response, oscillator_bank, sinc_impulse_response, ) from ._rir import simulate_rir_ism from .functional import barkscale_fbanks, chroma_filterbank __all__ = [ "adsr_envelope", "exp_sigm...
from __future__ import annotations from dataclasses import dataclass from sentence_transformers.training_args import SentenceTransformerTrainingArguments @dataclass class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments): r""" SparseEncoderTrainingArguments extends :class:`~SentenceTransf...
from __future__ import annotations from dataclasses import dataclass from sentence_transformers.training_args import SentenceTransformerTrainingArguments @dataclass class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments): r""" SparseEncoderTrainingArguments extends :class:`~SentenceTransf...
from typing import Any, Optional, Union from torchvision import tv_tensors from torchvision.transforms.v2 import functional as F, Transform from torchvision.tv_tensors._bounding_boxes import CLAMPING_MODE_TYPE class ConvertBoundingBoxFormat(Transform): """Convert bounding box coordinates to the given ``format``,...
from typing import Any, Union from torchvision import tv_tensors from torchvision.transforms.v2 import functional as F, Transform class ConvertBoundingBoxFormat(Transform): """Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY". Args: format (str or tv_tensors.Boundi...
import warnings from typing import Optional, Tuple, TypeVar from docarray.typing import AudioNdArray from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.utils._internal.misc import is_notebook ...
import warnings from typing import Optional, Tuple, TypeVar from docarray.typing import AudioNdArray from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.utils._internal.misc import is_notebook ...
import asyncio import logging from typing import Optional import aiohttp from fastapi import HTTPException from backend.data import graph as graph_db from backend.data.block import get_block from backend.util.settings import Settings from .models import ApiResponse, ChatRequest, GraphData logger = logging.getLogger...
import logging from typing import Optional import aiohttp from fastapi import HTTPException from backend.data import graph as graph_db from backend.data.block import get_block from backend.util.settings import Settings from .models import ApiResponse, ChatRequest, GraphData logger = logging.getLogger(__name__) sett...
from . import utils from .model import ( hubert_base, hubert_large, hubert_pretrain_base, hubert_pretrain_large, hubert_pretrain_model, hubert_pretrain_xlarge, hubert_xlarge, HuBERTPretrainModel, wav2vec2_base, wav2vec2_large, wav2vec2_large_lv60k, wav2vec2_model, Wav...
from . import utils from .model import ( hubert_base, hubert_large, hubert_pretrain_base, hubert_pretrain_large, hubert_pretrain_model, hubert_pretrain_xlarge, hubert_xlarge, HuBERTPretrainModel, wav2vec2_base, wav2vec2_large, wav2vec2_large_lv60k, wav2vec2_model, Wav...
# Copyright (c) OpenMMLab. All rights reserved. from .builder import build_match_cost from .match_cost import (BBoxL1Cost, ClassificationCost, DiceCost, FocalLossCost, IoUCost) __all__ = [ 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', 'FocalLossCost', 'DiceCost' ]
# Copyright (c) OpenMMLab. All rights reserved. from .builder import build_match_cost from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost __all__ = [ 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', 'FocalLossCost' ]
"""Azure Cognitive Vision tool spec.""" from typing import List, Optional import requests from llama_index.core.tools.tool_spec.base import BaseToolSpec CV_URL_TMPL = "https://{resource}.cognitiveservices.azure.com/computervision/imageanalysis:analyze" class AzureCVToolSpec(BaseToolSpec): """Azure Cognitive Vi...
"""Azure Cognitive Vision tool spec.""" from typing import List, Optional import requests from llama_index.core.tools.tool_spec.base import BaseToolSpec CV_URL_TMPL = "https://{resource}.cognitiveservices.azure.com/computervision/imageanalysis:analyze" class AzureCVToolSpec(BaseToolSpec): """Azure Cognitive Vi...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import MagicMock, Mock, patch from mmengine.hooks import IterTimerHook from mmengine.logging import MessageHub def time_patch(): if not hasattr(time_patch, 'time'): time_patch.time = 0 else: time_...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase from unittest.mock import MagicMock, Mock, patch from mmengine.hooks import IterTimerHook from mmengine.logging import MessageHub def time_patch(): if not hasattr(time_patch, 'time'): time_patch.time = 0 else: time_...
# Copyright (c) OpenMMLab. All rights reserved. import asyncio from argparse import ArgumentParser from mmdet.apis import (async_inference_detector, inference_detector, init_detector, show_result_pyplot) def parse_args(): parser = ArgumentParser() parser.add_argument('img', help='Imag...
# Copyright (c) OpenMMLab. All rights reserved. import asyncio from argparse import ArgumentParser from mmdet.apis import (async_inference_detector, inference_detector, init_detector, show_result_pyplot) def parse_args(): parser = ArgumentParser() parser.add_argument('img', help='Imag...
from typing import List from llama_index.core.instrumentation.events.base import BaseEvent from llama_index.core.base.response.schema import RESPONSE_TYPE from llama_index.core.schema import QueryType class SynthesizeStartEvent(BaseEvent): """ SynthesizeStartEvent. Args: query (QueryType): Query...
from typing import List from llama_index.core.instrumentation.events.base import BaseEvent from llama_index.core.base.response.schema import RESPONSE_TYPE from llama_index.core.schema import QueryType class SynthesizeStartEvent(BaseEvent): """SynthesizeStartEvent. Args: query (QueryType): Query as a...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.22.0" SCIPY_MIN_VERSION = "1.8.0" JOBLIB_MIN_VERSION = "1...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.22.0" SCIPY_MIN_VERSION = "1.8.0" JOBLIB_MIN_VERSION = "1...
from typing import cast import pytest from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableConfig from langchain_tests.integration_tests import ChatModelIntegrationTests ...
from typing import cast import pytest from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableConfig from langchain_tests.integration_tests import ChatModelIntegrationTests ...
# Copyright (c) OpenMMLab. All rights reserved. import copy import itertools import logging from typing import Dict, Optional from mmengine.logging import print_log from mmengine.model import is_model_wrapper from mmengine.registry import HOOKS, MODELS from .hook import DATA_BATCH, Hook @HOOKS.register_module() clas...
# Copyright (c) OpenMMLab. All rights reserved. import itertools from typing import Dict, Optional from mmengine.model import is_model_wrapper from mmengine.registry import HOOKS, MODELS from .hook import DATA_BATCH, Hook @HOOKS.register_module() class EMAHook(Hook): """A Hook to apply Exponential Moving Average...
from typing import Any, Literal from langchain_core.messages import AIMessage from langchain_core.outputs import ChatGeneration from pydantic import BaseModel from langchain_anthropic.output_parsers import ToolsOutputParser _CONTENT: list = [ { "type": "text", "text": "thought", }, {"type...
from typing import Any, List, Literal from langchain_core.messages import AIMessage from langchain_core.outputs import ChatGeneration from pydantic import BaseModel from langchain_anthropic.output_parsers import ToolsOutputParser _CONTENT: List = [ { "type": "text", "text": "thought", }, ...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
from typing import Any, Dict, Optional, Union import numpy as np import PIL.Image import torch from torchvision import datapoints from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2.utils import is_simple_tensor class PILToTensor(Transform): """[BETA] Convert a PIL I...
from typing import Any, Dict, Optional, Union import numpy as np import PIL.Image import torch from torchvision import datapoints from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2.utils import is_simple_tensor class PILToTensor(Transform): """[BETA] Convert a PIL I...
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
import warnings from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.filetypes import AUDIO_FILE_...
import warnings from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.filetypes import AUDIO_FILE_...
# Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import unittest import numpy as np from mmengine.data import BaseDataElement as PixelData from mmengine.data import InstanceData from mmdet.data_elements import DetDataSample from mmdet.data_elements.mask import BitmapMasks from mmdet.d...
# Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import unittest import numpy as np from mmengine.data import BaseDataElement as PixelData from mmengine.data import InstanceData from mmdet.data_elements import DetDataSample from mmdet.data_elements.mask import BitmapMasks from mmdet.d...
from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper class IncomeStatementsSchema(BaseModel): """In...
from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper class IncomeStatementsSchema(BaseModel): """In...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch from mmcv.ops import batched_nms from mmengine.data import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import InstanceList from .standard_roi_head ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch from mmcv.ops import batched_nms from mmengine.data import InstanceData from torch import Tensor from mmdet.data_elements import SampleList from mmdet.registry import MODELS from mmdet.utils import InstanceList from .standard_roi_he...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np from mmengine.testing import assert_allclose from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes from mmdet.structures.mask import BitmapMasks, PolygonMasks def create_random_bboxes(num_bboxes, img_w, img_h): bboxes_left_top = np.random....
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np from mmengine.testing import assert_allclose from mmdet.structures.mask import BitmapMasks, PolygonMasks def create_random_bboxes(num_bboxes, img_w, img_h): bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2)) bboxes_right_bottom...
import json import re from re import Pattern from typing import Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException from langchain.agents.agent import AgentOutputParser from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS FINAL_ANSWER_A...
import json import re from re import Pattern from typing import Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException from langchain.agents.agent import AgentOutputParser from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS FINAL_ANSWER_A...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
"""Standard LangChain interface tests""" from typing import Type import pytest from langchain_core.language_models import BaseChatModel from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_community.chat_models import ChatPerplexity class TestPerplexityStandard(ChatModelIntegratio...
"""Standard LangChain interface tests""" from typing import Type import pytest from langchain_core.language_models import BaseChatModel from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_community.chat_models import ChatPerplexity class TestPerplexityStandard(ChatModelIntegratio...
"""Run smoke tests""" import torchaudio # noqa: F401 import torchaudio.compliance.kaldi # noqa: F401 import torchaudio.datasets # noqa: F401 import torchaudio.functional # noqa: F401 import torchaudio.models # noqa: F401 import torchaudio.pipelines # noqa: F401 import torchaudio.sox_effects # noqa: F401 import ...
"""Run smoke tests""" import torchaudio # noqa: F401 import torchaudio.compliance.kaldi # noqa: F401 import torchaudio.datasets # noqa: F401 import torchaudio.functional # noqa: F401 import torchaudio.models # noqa: F401 import torchaudio.pipelines # noqa: F401 import torchaudio.sox_effects # noqa: F401 import ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.sql.prompt import ( SQL_FUNCTIONS_SUFFIX, SQL_PREFIX, SQL_SUFFIX, ) # Create a way to dynamically look up deprecated imports. # Used to consolidat...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.sql.prompt import ( SQL_FUNCTIONS_SUFFIX, SQL_PREFIX, SQL_SUFFIX, ) # Create a way to dynamically look up deprecated imports. # Used to consolidat...
# coding: utf-8 from pathlib import Path import numpy as np import pandas as pd from sklearn.metrics import mean_squared_error from sklearn.model_selection import GridSearchCV import lightgbm as lgb print("Loading data...") # load or create your dataset regression_example_dir = Path(__file__).absolute().parents[1] /...
# coding: utf-8 from pathlib import Path import numpy as np import pandas as pd from sklearn.metrics import mean_squared_error from sklearn.model_selection import GridSearchCV import lightgbm as lgb print('Loading data...') # load or create your dataset regression_example_dir = Path(__file__).absolute().parents[1] /...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import pytest from jina import Document, Flow from ...video_torch_encoder import VideoTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def kinec...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import pytest from jina import Document, Flow from ...video_torch_encoder import VideoTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def kinects_videos(): f...
from typing import ( TYPE_CHECKING, Iterable, ) from docarray.array.memory import DocumentArrayInMemory if TYPE_CHECKING: # pragma: no cover from docarray.document import Document class MatchArray(DocumentArrayInMemory): """ :class:`MatchArray` inherits from :class:`DocumentArray`. It's a s...
from typing import ( TYPE_CHECKING, Iterable, ) from docarray.array.memory import DocumentArrayInMemory if TYPE_CHECKING: from docarray.document import Document class MatchArray(DocumentArrayInMemory): """ :class:`MatchArray` inherits from :class:`DocumentArray`. It's a subset of Documents t...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast import numpy as np from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal....
from typing import Union from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.utils._internal.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor tf_ava...
import random import numpy as np import torch from torchvision import transforms as T from torchvision.transforms import functional as F def pad_if_smaller(img, size, fill=0): min_size = min(img.size) if min_size < size: ow, oh = img.size padh = size - oh if oh < size else 0 padw = si...
import random import numpy as np import torch from torchvision import transforms as T from torchvision.transforms import functional as F def pad_if_smaller(img, size, fill=0): min_size = min(img.size) if min_size < size: ow, oh = img.size padh = size - oh if oh < size else 0 padw = si...
# flake8: noqa from typing import Any from typing_extensions import assert_type from torch import randn, Tensor # See ../pass/arithmetic_ops.py for more information TENSOR, FLOAT = randn(3), 1.5 FLOAT & TENSOR # E: Unsupported operand types for & ("float" and "Tensor") FLOAT | TENSOR # E: Unsupported operand typ...
# flake8: noqa from typing import Any from typing_extensions import assert_type from torch import randn, Tensor # See ../pass/arithmetic_ops.py for more information TENSOR, INT, FLOAT = randn(3), 2, 1.5 FLOAT & TENSOR # E: Unsupported operand types for & ("float" and "Tensor") FLOAT | TENSOR # E: Unsupported ope...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' input_size = 300 train_pipeline = [ dict(type='LoadImageFromFile'), dict(type=...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_p...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.imdb import get_word_index as get_word_index from keras.src.datasets.imdb import load_data as load_data
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.imdb import get_word_index from keras.src.datasets.imdb import load_data
from docarray.array.document import DocumentArray from docarray.array.storage.qdrant import StorageMixins, QdrantConfig __all__ = ['DocumentArrayQdrant', 'QdrantConfig'] class DocumentArrayQdrant(StorageMixins, DocumentArray): """ DocumentArray that stores Documents in a `Qdrant <https://weaviate.io/>`_ vect...
from .document import DocumentArray from .storage.qdrant import StorageMixins, QdrantConfig __all__ = ['DocumentArrayQdrant', 'QdrantConfig'] class DocumentArrayQdrant(StorageMixins, DocumentArray): """ DocumentArray that stores Documents in a `Qdrant <https://weaviate.io/>`_ vector search engine. .. no...
from datetime import datetime, timedelta from backend.blocks.hubspot._auth import ( HubSpotCredentials, HubSpotCredentialsField, HubSpotCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request impo...
from datetime import datetime, timedelta from backend.blocks.hubspot._auth import ( HubSpotCredentials, HubSpotCredentialsField, HubSpotCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request impo...
from typing import TYPE_CHECKING, Type, List if TYPE_CHECKING: # pragma: no cover from docarray.typing import T from docarray.document.strawberry_type import StrawberryDocument class StrawberryMixin: def to_strawberry_type(self) -> List['StrawberryDocument']: """Convert a DocumentArray object in...
from typing import TYPE_CHECKING, Type, List if TYPE_CHECKING: from docarray.typing import T from docarray.document.strawberry_type import StrawberryDocument class StrawberryMixin: def to_strawberry_type(self) -> List['StrawberryDocument']: """Convert a DocumentArray object into a Pydantic model....
import re from io import BytesIO from pathlib import Path from typing import Any, Type import numpy as np import pytest from langchain_core.documents.base import Blob from langchain_core.language_models import FakeMessagesListChatModel from langchain_core.messages import ChatMessage from langchain_community.document_...
import re from pathlib import Path from typing import Any, Type import pytest from langchain_core.documents.base import Blob from langchain_core.language_models import FakeMessagesListChatModel from langchain_core.messages import ChatMessage from langchain_community.document_loaders.parsers.images import ( LLMIma...
from __future__ import annotations import json from typing import Any, Optional from langchain_core._api import deprecated from langchain_core.callbacks import CallbackManagerForChainRun from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_t...
from __future__ import annotations import json from typing import Any, Dict, List, Optional from langchain_core._api import deprecated from langchain_core.callbacks import CallbackManagerForChainRun from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from...
import importlib import pytest from fastapi.testclient import TestClient from ...utils import needs_py39 @pytest.fixture( name="client", params=[ "tutorial012", pytest.param("tutorial012_py39", marks=needs_py39), "tutorial012_an", pytest.param("tutorial012_an_py39", marks=nee...
from fastapi.testclient import TestClient from docs_src.query_params_str_validations.tutorial012 import app client = TestClient(app) def test_default_query_values(): url = "/items/" response = client.get(url) assert response.status_code == 200, response.text assert response.json() == {"q": ["foo", "...
""" This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage: python trai...
""" This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage: python trai...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import tempfile from collections import OrderedDict import torch from mmengine import Config from mmengine.utils import digit_version def parse_config(config_strings): temp_file = tempfile.NamedTemporaryFile() config_path = f'{temp_file.name}.py...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import tempfile from collections import OrderedDict import torch from mmengine import Config def parse_config(config_strings): temp_file = tempfile.NamedTemporaryFile() config_path = f'{temp_file.name}.py' with open(config_path, 'w') as f: ...
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests."""
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests.""" pass
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=1.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_faile...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.22.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_fa...
"""Tests for the Google Cloud DocAI parser.""" from unittest.mock import MagicMock, patch import pytest from langchain_community.document_loaders.parsers import ( AzureAIDocumentIntelligenceParser, ) @pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence") @patch("azure.ai.documentintelligen...
"""Tests for the Google Cloud DocAI parser.""" from unittest.mock import MagicMock, patch import pytest from langchain_community.document_loaders.parsers import ( AzureAIDocumentIntelligenceParser, ) @pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence") @patch("azure.ai.documentintelligen...
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmdet.registry import TASK_UTILS PRIOR_GENERATORS = TASK_UTILS ANCHOR_GENERATORS = TASK_UTILS def build_prior_generator(cfg, default_args=None): warnings.warn( '``build_prior_generator`` would be deprecated soon, please use ' ...
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.utils import Registry, build_from_cfg PRIOR_GENERATORS = Registry('Generator for anchors and points') ANCHOR_GENERATORS = PRIOR_GENERATORS def build_prior_generator(cfg, default_args=None): return build_from_cfg(cfg, PRIOR_GENERATORS, de...
from typing import Any, Optional, Union, cast from langchain_core.messages import AIMessage, ToolCall from langchain_core.messages.tool import tool_call from langchain_core.output_parsers import BaseGenerationOutputParser from langchain_core.outputs import ChatGeneration, Generation from pydantic import BaseModel, Con...
from typing import Any, List, Optional, Type, Union, cast from langchain_core.messages import AIMessage, ToolCall from langchain_core.messages.tool import tool_call from langchain_core.output_parsers import BaseGenerationOutputParser from langchain_core.outputs import ChatGeneration, Generation from pydantic import Ba...
from datetime import datetime import pytest from jina import Document, DocumentArray, Flow class MyOwnException(Exception): pass @pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket']) def test_invalid_input_raise(protocol): f = Flow(protocol=protocol).add() try: with f: ...
import pytest from datetime import datetime from jina import Flow, DocumentArray, Document class MyOwnException(Exception): pass @pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket']) def test_invalid_input_raise(protocol): f = Flow(protocol=protocol).add() try: with f: ...
from langchain_core.prompts.prompt import PromptTemplate KG_TRIPLE_DELIMITER = "<|>" _DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = ( "You are a networked intelligence helping a human track knowledge triples" " about all relevant people, things, concepts, etc. and integrating" " them with your knowledge ...
# flake8: noqa from langchain_core.prompts.prompt import PromptTemplate KG_TRIPLE_DELIMITER = "<|>" _DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = ( "You are a networked intelligence helping a human track knowledge triples" " about all relevant people, things, concepts, etc. and integrating" " them with...
"""Argparser module for container runtimes""" from jina.parsers.helper import KVAppendAction, add_arg_group def mixin_container_runtime_parser(parser, pod_type: str = 'executor'): """Mixing in arguments required by :class:`ContainerRuntime` into the given parser. :param parser: the parser instance to which we...
"""Argparser module for container runtimes""" from jina.parsers.helper import KVAppendAction, add_arg_group def mixin_container_runtime_parser(parser): """Mixing in arguments required by :class:`ContainerRuntime` into the given parser. :param parser: the parser instance to which we add arguments """ g...
import os from pathlib import Path import pytest from jina.hubble import HubExecutor, hubapi from jina.hubble.hubapi import list_local cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture def executor_zip_file(): return Path(__file__).parent / 'dummy_executor.zip' @pytest.fixture def test_exe...
import os from pathlib import Path import pytest from jina.hubble import HubExecutor, hubapi from jina.hubble.hubapi import list_local cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture def executor_zip_file(): return Path(__file__).parent / 'dummy_executor.zip' @pytest.fixture def test_exe...
"""Parser for JSON output.""" from __future__ import annotations import json from json import JSONDecodeError from typing import Annotated, Any, Optional, TypeVar, Union import jsonpatch # type: ignore[import] import pydantic from pydantic import SkipValidation from langchain_core.exceptions import OutputParserExc...
from __future__ import annotations import json from json import JSONDecodeError from typing import Annotated, Any, Optional, TypeVar, Union import jsonpatch # type: ignore[import] import pydantic from pydantic import SkipValidation from langchain_core.exceptions import OutputParserException from langchain_core.outp...
# training schedule for 20e train_cfg = dict(by_epoch=True, max_epochs=20) val_cfg = dict(interval=1) test_cfg = dict() # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=20, ...
# training schedule for 20e train_cfg = dict(by_epoch=True, max_epochs=20) val_cfg = dict(interval=1) test_cfg = dict() # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=20, ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import numpy as np import pytest import torch from executor.models import EmbeddingModelWrapper, _ModelCatalogue @pytest.mark.parametrize( ['model_name', 'is_supported'], [ ('ResNet', False), ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import numpy as np import pytest import torch from ...models import EmbeddingModelWrapper, _ModelCatalogue @pytest.mark.parametrize( ['model_name', 'is_supported'], [ ('ResNet', False), ('re...
from jina import DocumentArray, Flow from ...clip_text import CLIPTextEncoder def test_no_documents(): test_docs = DocumentArray() f = Flow().add(uses=CLIPTextEncoder) with f: f.search(test_docs, {}) assert len(test_docs) == 0 # SUCCESS
from jina import DocumentArray, Flow from ...clip_text import CLIPTextEncoder def test_no_documents(): test_docs = DocumentArray() f = Flow().add(uses=CLIPTextEncoder) with f: f.search(test_docs, {}) assert len(test_docs) == 0 # SUCCESS
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator from .InformationRetrievalEvaluator import InformationRetrievalEvaluator from .LabelAccuracyEvaluator import LabelAccuracyEvaluator from .MSEEvaluator import MSEEvaluator from ...
from .SentenceEvaluator import SentenceEvaluator from .SimilarityFunction import SimilarityFunction from .BinaryClassificationEvaluator import BinaryClassificationEvaluator from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator from .InformationRetrievalEvaluator import InformationRetrievalEvaluator fro...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( bac...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( bac...
from __future__ import annotations import functools import logging logger = logging.getLogger(__name__) def cross_encoder_init_args_decorator(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): kwargs_renamed_mapping = { "model_name": "model_name_or_path", "auto...
from __future__ import annotations import functools import logging logger = logging.getLogger(__name__) def cross_encoder_init_args_decorator(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): kwargs_renamed_mapping = { "model_name": "model_name_or_path", "auto...
_base_ = ['faster-rcnn_r50_fpn_32xb2-1x_openimages-challenge.py'] # Use ClassAwareSampler train_dataloader = dict( sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1))
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py'] # Use ClassAwareSampler train_dataloader = dict( sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1))
# ruff: noqa: F401 # This is the module that test_patching.py uses to test patch_submodule() import os import os as renamed_os from os import path from os import path as renamed_path from os.path import join from os.path import join as renamed_join open = open # we just need to have a builtin inside this module to t...
# isort: skip_file # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: F401 - this is just for tests import os as renamed_os # noqa: F401 - this is just for tests from os import path # noqa: F401 - this is just for tests from os import path as renamed_path # noqa: F401 - th...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64]. It generates sentence embeddings that can be compared using...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64]. It generates sentence embeddings that can be compared using...
import numpy as np from docarray import BaseDoc from docarray.array import DocVec from docarray.array.doc_vec.column_storage import ColumnStorageView from docarray.typing import AnyTensor def test_column_storage_init(): class InnerDoc(BaseDoc): price: int class MyDoc(BaseDoc): tensor: AnyTen...
import numpy as np from docarray import BaseDoc from docarray.array import DocVec from docarray.array.doc_vec.column_storage import ColumnStorageView from docarray.typing import AnyTensor def test_column_storage_init(): class InnerDoc(BaseDoc): price: int class MyDoc(BaseDoc): tensor: AnyTen...
"""Faiss reader.""" from typing import Any, Dict, List import numpy as np from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class FaissReader(BaseReader): """ Faiss reader. Retrieves documents through an existing in-memory Faiss index. These documents...
"""Faiss reader.""" from typing import Any, Dict, List import numpy as np from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class FaissReader(BaseReader): """Faiss reader. Retrieves documents through an existing in-memory Faiss index. These documents can ...
import types from typing import TYPE_CHECKING from docarray.index.backends.in_memory import InMemoryExactNNIndex from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401 ...
import types from typing import TYPE_CHECKING from docarray.index.backends.in_memory import InMemoryDocIndex from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401 fro...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast import numpy as np from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal....
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast import numpy as np from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal....
import pytest DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" DATASET_LOADING_SCRIPT_CODE = """ import json import os import datasets REPO_URL = "https://huggingface.co/datasets/hf-internal-testing/raw_jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-...
import pytest DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" DATASET_LOADING_SCRIPT_CODE = """ import json import os import datasets REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikian...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=d...
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
import csv import gzip import logging import os from datetime import datetime import torch from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator #### Just some code to print debug information...
import csv import gzip import logging import os from datetime import datetime import torch from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator #### Just some code to print debug information...
"""Test in memory docstore.""" from typing import Any from langchain.output_parsers.combining import CombiningOutputParser from langchain.output_parsers.regex import RegexParser from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser DEF_EXPECTED_RESULT = { "answer": "Paris", "...
"""Test in memory docstore.""" from typing import Any from langchain.output_parsers.combining import CombiningOutputParser from langchain.output_parsers.regex import RegexParser from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser DEF_EXPECTED_RESULT = { "answer": "Paris", "...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from uuid import UUID import pytest from pydantic import schema_json_of from pydantic.tools import parse_obj_as from docarray.base_doc.io.json import orjson_dumps from docarray.typing import ID @pytest.mark.parametrize( 'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')] ) def test_id_validation(...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # training schedule for 2x train_cfg = dict(max_epochs=24) # learning rate policy param_scheduler = [ dict( type='LinearLR', start_...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # training schedule for 2x train_cfg = dict(max_epochs=24) # learning rate policy param_scheduler = [ dict( type='LinearLR', start_...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( data_preprocessor=dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), ...
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve, speed __all__ = [ "add_noise", "adsr_envelope", "barkscale_fbanks", "convolve", "extend_pitch", "fftconvolve", "oscillator_bank",...
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve __all__ = [ "add_noise", "adsr_envelope", "barkscale_fbanks", "convolve", "extend_pitch", "fftconvolve", "oscillator_bank", "s...
""" ================== Two-class AdaBoost ================== This example fits an AdaBoosted decision stump on a non-linearly separable classification dataset composed of two "Gaussian quantiles" clusters (see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision boundary and decision scores. The di...
""" ================== Two-class AdaBoost ================== This example fits an AdaBoosted decision stump on a non-linearly separable classification dataset composed of two "Gaussian quantiles" clusters (see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision boundary and decision scores. The di...
from typing import Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='Vi...
from typing import Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='Vi...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess import pytest from jina import Document, DocumentArray, Flow from ...dpr_reader import DPRReaderRanker @pytest.mark.parametrize('request_size', [1, 8, 50]) def test_integration(request_size:...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import pytest from jina import Document, DocumentArray, Flow from ...dpr_reader import DPRReaderRanker @pytest.mark.parametrize('request_size', [1, 8, 50]) def test_integration(request_size: int): docs = D...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import NotebookLoader from langchain_community.document_loaders.notebook import ( concatenate_cells, remove_newlines, ) # Create a way to dynamicall...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import NotebookLoader from langchain_community.document_loaders.notebook import ( concatenate_cells, remove_newlines, ) # Create a way to dynamicall...
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar from torch.utils.data import Dataset from docarray import BaseDoc, DocList, DocVec from docarray.typing import TorchTensor from docarray.utils._internal._typing import change_cls_name, safe_issubclass T_doc = TypeVar('T_doc', bound=BaseDoc) ...
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar from torch.utils.data import Dataset from docarray import BaseDoc, DocList, DocVec from docarray.typing import TorchTensor from docarray.utils._internal._typing import change_cls_name T_doc = TypeVar('T_doc', bound=BaseDoc) class MultiModalD...
""" NOTE: This file must be imported like ``import torch.distributed.fsdp._traversal_utils`` and not like ``from torch.distributed.fsdp._traversal_utils import ...`` to avoid circular imports. For brevity, we may import the file as ``traversal_utils``. """ import collections import torch.nn as nn from torch.distribut...
""" NOTE: This file must be imported like ``import torch.distributed.fsdp._traversal_utils`` and not like ``from torch.distirbuted.fsdp._traversal_utils import ...`` to avoid circular imports. For brevity, we may import the file as ``traversal_utils``. """ import collections import torch.nn as nn from torch.distribut...
import sys import traceback from importlib.machinery import SourceFileLoader if __name__ == "__main__": files = sys.argv[1:] has_failure = False for file in files: try: SourceFileLoader("x", file).load_module() except Exception: has_failure = True traceba...
import sys import traceback from importlib.machinery import SourceFileLoader if __name__ == "__main__": files = sys.argv[1:] has_failure = False for file in files: try: SourceFileLoader("x", file).load_module() except Exception: has_failure = True print(f...
"""Format instructions.""" JSON_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below. As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required"...
# flake8: noqa JSON_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below. As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}} ...
import logging from collections import defaultdict from typing import Annotated, Any, Dict, List, Optional, Sequence from autogpt_libs.utils.cache import thread_cached from fastapi import APIRouter, Body, Depends, HTTPException from prisma.enums import AgentExecutionStatus, APIKeyPermission from typing_extensions impo...
import logging from collections import defaultdict from typing import Annotated, Any, Dict, List, Optional, Sequence from autogpt_libs.utils.cache import thread_cached from fastapi import APIRouter, Body, Depends, HTTPException from prisma.enums import AgentExecutionStatus, APIKeyPermission from typing_extensions impo...
import os import torchaudio import torchvision from torch.utils.data import Dataset def _load_list(args, *filenames): output = [] length = [] for filename in filenames: filepath = os.path.join(os.path.dirname(args.dataset_path), filename) for line in open(filepath).read().splitlines(): ...
import os from pathlib import Path from typing import Tuple, Union import torch import torchaudio import torchvision from torch import Tensor from torch.utils.data import Dataset def _load_list(args, *filenames): output = [] length = [] for filename in filenames: filepath = os.path.join(os.path.d...
from __future__ import annotations from dataclasses import dataclass from sentence_transformers.training_args import SentenceTransformerTrainingArguments @dataclass class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments): """ SparseEncoderTrainingArguments extends :class:`~transformers.Tr...
from __future__ import annotations from dataclasses import dataclass from sentence_transformers.training_args import SentenceTransformerTrainingArguments @dataclass class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments): """ SparseEncoderTrainingArguments extends :class:`~transformers.Tr...
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' rpn_weight = 0.7 model = dict( rpn_head=dict( _delete_=True, type='CascadeRPNHead', num_stages=2, stages=[ dict( type='StageCascadeRPNHead', in_channels=256, fea...
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' rpn_weight = 0.7 model = dict( rpn_head=dict( _delete_=True, type='CascadeRPNHead', num_stages=2, stages=[ dict( type='StageCascadeRPNHead', in_channels=256, fea...
import csv import os from pathlib import Path from typing import Tuple, Union import torchaudio from torch import Tensor from torch.utils.data import Dataset from torchaudio._internal import download_url_to_file from torchaudio.datasets.utils import _extract_tar _RELEASE_CONFIGS = { "release1": { "folder...
import csv import os from pathlib import Path from typing import Tuple, Union import torchaudio from torch import Tensor from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import _extract_tar _RELEASE_CONFIGS = { "release1": { "folder_in_archive...