input
stringlengths
33
5k
output
stringlengths
32
5k
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple, Union import mmcv import numpy as np from mmengine.utils import is_str def palette_val(palette: List[tuple]) -> List[tuple]: """Convert palette to matplotlib palette. Args: palette (List[tuple]): A list of color tuples. ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple, Union import mmcv import numpy as np from mmengine.utils import is_str def palette_val(palette: List[tuple]) -> List[tuple]: """Convert palette to matplotlib palette. Args: palette (List[tuple]): A list of color tuples. ...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers import util from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder cla...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers import util from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder cla...
import os import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDocument from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor @pytest.mark.parametrize( 'tensor,cls_audio...
import os import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDocument from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor @pytest.mark.parametrize( 'tensor,cls_audio...
from .autoencoder_asym_kl import AsymmetricAutoencoderKL from .autoencoder_dc import AutoencoderDC from .autoencoder_kl import AutoencoderKL from .autoencoder_kl_allegro import AutoencoderKLAllegro from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX from .autoencoder_kl_cosmos import AutoencoderKLCosmos from ....
from .autoencoder_asym_kl import AsymmetricAutoencoderKL from .autoencoder_dc import AutoencoderDC from .autoencoder_kl import AutoencoderKL from .autoencoder_kl_allegro import AutoencoderKLAllegro from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyua...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union from mmengine.config import ConfigDict from mmdet.registry import MODELS from .two_stage import TwoStageDetector @MODELS.register_module() class FasterRCNN(TwoStageDetector): """Implementation of `Faster R-CNN <https://arxiv.org/...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from .two_stage import TwoStageDetector @MODELS.register_module() class FasterRCNN(TwoStageDetector): """Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_""" def __init__(self, backbone, ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Union import torch from numpy import ndarray from torch import Tensor from mmdet.core.bbox.assigners import AssignResult from mmdet.registry import TASK_UTILS from .base_sampler import BaseSampler @TASK_UTILS.register_module() class RandomSampler(Ba...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Union import torch from numpy import ndarray from torch import Tensor from mmdet.core.bbox.assigners import AssignResult from mmdet.registry import TASK_UTILS from .base_sampler import BaseSampler @TASK_UTILS.register_module() class RandomSampler(Ba...
"""Pydantic v1 compatibility shim.""" from langchain_core._api import warn_deprecated try: from pydantic.v1.dataclasses import * # noqa: F403 except ImportError: from pydantic.dataclasses import * # type: ignore # noqa: F403 warn_deprecated( "0.3.0", removal="1.0.0", alternative="pydantic.v1 or...
from langchain_core._api import warn_deprecated try: from pydantic.v1.dataclasses import * # noqa: F403 except ImportError: from pydantic.dataclasses import * # type: ignore # noqa: F403 warn_deprecated( "0.3.0", removal="1.0.0", alternative="pydantic.v1 or pydantic", message=( "As o...
from langchain_huggingface.chat_models.huggingface import ( # type: ignore[import-not-found] TGI_MESSAGE, TGI_RESPONSE, ChatHuggingFace, _convert_dict_to_message, ) __all__ = ["TGI_MESSAGE", "TGI_RESPONSE", "ChatHuggingFace", "_convert_dict_to_message"]
from langchain_huggingface.chat_models.huggingface import ( # type: ignore[import-not-found] TGI_MESSAGE, TGI_RESPONSE, ChatHuggingFace, _convert_dict_to_message, ) __all__ = ["ChatHuggingFace", "_convert_dict_to_message", "TGI_MESSAGE", "TGI_RESPONSE"]
import re from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField class CodeExtractionBlock(Block): class Input(BlockSchema): text: str = SchemaField( description="Text containing code blocks to extract (e.g., AI response)", ...
import re from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField class CodeExtractionBlock(Block): class Input(BlockSchema): text: str = SchemaField( description="Text containing code blocks to extract (e.g., AI response)", ...
from argparse import Namespace from copy import deepcopy from typing import TYPE_CHECKING, Type from hubble.executor.helper import is_valid_huburi from hubble.executor.hubio import HubIO from jina.enums import PodRoleType from jina.orchestrate.pods import Pod from jina.orchestrate.pods.container import ContainerPod ...
from argparse import Namespace from copy import deepcopy from typing import TYPE_CHECKING, Type from hubble.executor.helper import is_valid_huburi from hubble.executor.hubio import HubIO from jina.enums import PodRoleType from jina.orchestrate.pods import Pod from jina.orchestrate.pods.container import ContainerPod ...
from datetime import datetime, timedelta, timezone import jwt from fastapi import Depends, FastAPI, HTTPException, status from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm from jwt.exceptions import InvalidTokenError from passlib.context import CryptContext from pydantic import BaseModel # ...
from datetime import datetime, timedelta, timezone import jwt from fastapi import Depends, FastAPI, HTTPException, status from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm from jwt.exceptions import InvalidTokenError from passlib.context import CryptContext from pydantic import BaseModel # ...
"""Joint QA Summary graph.""" from typing import List, Optional, Sequence from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.callbacks.base import CallbackManager from llama_index.core.indices.list.base import SummaryIndex from llama_index.core.indices.vector_store import VectorStor...
"""Joint QA Summary graph.""" from typing import List, Optional, Sequence from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.callbacks.base import CallbackManager from llama_index.core.indices.list.base import SummaryIndex from llama_index.core.indices.vector_store import VectorStor...
import logging import pytest from backend.util.test import SpinTestServer # NOTE: You can run tests like with the --log-cli-level=INFO to see the logs # Set up logging logger = logging.getLogger(__name__) # Create console handler with formatting ch = logging.StreamHandler() ch.setLevel(logging.INFO) formatter = lo...
import pytest from backend.util.test import SpinTestServer @pytest.fixture(scope="session") async def server(): async with SpinTestServer() as server: yield server @pytest.fixture(scope="session", autouse=True) async def graph_cleanup(server): created_graph_ids = [] original_create_graph = serv...
from typing import Any, Dict, Iterable import torch from torch import Tensor, nn from sentence_transformers import util from sentence_transformers.SentenceTransformer import SentenceTransformer class CoSENTLoss(nn.Module): def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.p...
from typing import Dict, Iterable import torch from torch import Tensor, nn from sentence_transformers import util from sentence_transformers.SentenceTransformer import SentenceTransformer class CoSENTLoss(nn.Module): def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwi...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_v...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_v...
import os from typing import BinaryIO, Optional, Tuple, Union import torch import torchaudio from .backend import Backend from .common import AudioMetaData sox_ext = torchaudio._extension.lazy_import_sox_ext() class SoXBackend(Backend): @staticmethod def info(uri: Union[BinaryIO, str, os.PathLike], format:...
import os from typing import BinaryIO, Optional, Tuple, Union import torch import torchaudio from .backend import Backend from .common import AudioMetaData sox_ext = torchaudio._extension.lazy_import_sox_ext() class SoXBackend(Backend): @staticmethod def info(uri: Union[BinaryIO, str, os.PathLike], format:...
# -*- coding: utf-8 -*- """ Audio Feature Augmentation ========================== **Author**: `Moto Hira <moto@meta.com>`__ """ # When running this tutorial in Google Colab, install the required packages # with the following. # !pip install torchaudio librosa import torch import torchaudio import torchaudio.transfo...
# -*- coding: utf-8 -*- """ Audio Feature Augmentation ========================== **Author**: `Moto Hira <moto@meta.com>`__ """ # When running this tutorial in Google Colab, install the required packages # with the following. # !pip install torchaudio librosa import torch import torchaudio import torchaudio.transfo...
import csv import logging import os from typing import Optional import numpy as np from sklearn.metrics import ndcg_score logger = logging.getLogger(__name__) class CERerankingEvaluator: """ This class evaluates a CrossEncoder model for the task of re-ranking. Given a query and a list of documents, it ...
import logging import numpy as np import os import csv from typing import Optional from sklearn.metrics import ndcg_score logger = logging.getLogger(__name__) class CERerankingEvaluator: """ This class evaluates a CrossEncoder model for the task of re-ranking. Given a query and a list of documents, it c...
""" Outlook local calendar reader for Windows. Created on Sun Apr 16 12:03:19 2023 @author: tevslin """ import datetime import importlib import platform from typing import List, Optional, Union from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document # Copyright 2023 Evslin...
""" Outlook local calendar reader for Windows. Created on Sun Apr 16 12:03:19 2023 @author: tevslin """ import datetime import importlib import platform from typing import List, Optional, Union from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document # Copyright 2023 Evslin...
"""Retriever tool.""" from typing import TYPE_CHECKING, Any, List, Optional from llama_index.core.base.base_retriever import BaseRetriever if TYPE_CHECKING: from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool from llama_index.core.schema import ( MetadataMode, Node, NodeWithSc...
"""Retriever tool.""" from typing import TYPE_CHECKING, Any, List, Optional from llama_index.core.base.base_retriever import BaseRetriever if TYPE_CHECKING: from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool from llama_index.core.schema import ( MetadataMode, Node, NodeWithSc...
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric...
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric...
from llama_index.agent.azure_foundry_agent.base import AzureFoundryAgent __all__ = ["AzureFoundryAgent"]
from llama_index.agent.azure_foundry_agent.base import AzureFoundryAgent __all__ = [ "AzureFoundryAgent" ]
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple, Union import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig @MODELS.register_module()...
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig @MODELS.register_module() class ...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.models.roi_heads import PointRendRoIHead # noqa from mmdet.registry import MODELS from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.models.roi_heads import PointRendRoIHead # noqa from mmdet.registry import MODELS from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg...
# Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import warnings from mmengine.config import Config, ConfigDict from mmengine.logging import print_log def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working directory. Args: ...
# Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import warnings import mmcv from mmcv.utils import print_log def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working directory. Args: path(str): The path to find checkpo...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch import torch.nn.functional as F from mmengine.model.utils import constant_init from mmdet.models.layers import DyReLU, SELayer def test_se_layer(): with pytest.raises(AssertionError): # act_cfg sequence length must equal to 2 ...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch import torch.nn.functional as F from mmcv.cnn import constant_init from mmdet.models.layers import DyReLU, SELayer def test_se_layer(): with pytest.raises(AssertionError): # act_cfg sequence length must equal to 2 SELayer(...
"""Standard LangChain interface tests""" import os from langchain_core.language_models import BaseChatModel from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_openai import AzureChatOpenAI OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "") OPENAI_API_BASE = os.en...
"""Standard LangChain interface tests""" import os from typing import Type from langchain_core.language_models import BaseChatModel from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_openai import AzureChatOpenAI OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")...
from .backend_utils import set_audio_backend from .case_utils import ( HttpServerMixin, is_ffmpeg_available, PytorchTestCase, skipIfNoCtcDecoder, skipIfNoCuda, skipIfNoExec, skipIfNoFFmpeg, skipIfNoKaldi, skipIfNoModule, skipIfNoQengine, skipIfNoSox, skipIfPy310, skip...
from .backend_utils import set_audio_backend from .case_utils import ( HttpServerMixin, is_ffmpeg_available, PytorchTestCase, skipIfNoCtcDecoder, skipIfNoCuda, skipIfNoExec, skipIfNoFFmpeg, skipIfNoKaldi, skipIfNoModule, skipIfNoQengine, skipIfNoSox, skipIfPy310, skip...
_base_ = [ 'mmdet::_base_/datasets/coco_instance.py', 'mmdet::_base_/schedules/schedule_1x.py', 'mmdet::_base_/default_runtime.py' ] custom_imports = dict( imports=['projects.SparseInst.sparseinst'], allow_failed_imports=False) model = dict( type='SparseInst', data_preprocessor=dict( t...
_base_ = [ 'mmdet::_base_/datasets/coco_instance.py', 'mmdet::_base_/schedules/schedule_1x.py', 'mmdet::_base_/default_runtime.py' ] custom_imports = dict( imports=['projects.SparseInst.sparseinst'], allow_failed_imports=False) model = dict( type='SparseInst', data_preprocessor=dict( t...
# Copyright (c) OpenMMLab. All rights reserved. from .batch_sampler import (AspectRatioBatchSampler, TrackAspectRatioBatchSampler) from .class_aware_sampler import ClassAwareSampler from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler from .track_img_sampler import T...
# Copyright (c) OpenMMLab. All rights reserved. from .batch_sampler import AspectRatioBatchSampler from .class_aware_sampler import ClassAwareSampler from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler from .track_img_sampler import TrackImgSampler __all__ = [ 'ClassAwareSampler', 'Aspect...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
from contextlib import contextmanager from typing import TYPE_CHECKING, Callable, Iterator from llama_index.core.llms.llm import LLM from llama_index.llms.huggingface import HuggingFaceLLM from llama_index.llms.llama_cpp import LlamaCPP if TYPE_CHECKING: from lmformatenforcer import CharacterLevelParser def bui...
from contextlib import contextmanager from typing import TYPE_CHECKING, Callable, Iterator from llama_index.core.llms.llm import LLM from llama_index.llms.huggingface import HuggingFaceLLM from llama_index.llms.llama_cpp import LlamaCPP if TYPE_CHECKING: from lmformatenforcer import CharacterLevelParser def bui...
"""Standard LangChain interface tests""" from pathlib import Path from typing import Dict, List, Literal, Type, cast from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_openai imp...
"""Standard LangChain interface tests""" from pathlib import Path from typing import Dict, List, Literal, Type, cast from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_openai imp...
"""Util that sends calendar events in Office 365. Free, but setup is required. See link below. https://learn.microsoft.com/en-us/graph/auth/ """ from datetime import datetime as dt from typing import List, Optional, Type from zoneinfo import ZoneInfo from langchain_core.callbacks import CallbackManagerForToolRun fro...
"""Util that sends calendar events in Office 365. Free, but setup is required. See link below. https://learn.microsoft.com/en-us/graph/auth/ """ from datetime import datetime as dt from typing import List, Optional, Type from zoneinfo import ZoneInfo from langchain_core.callbacks import CallbackManagerForToolRun fro...
# Copyright (c) OpenMMLab. All rights reserved. from .default_scope import DefaultScope from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RU...
# Copyright (c) OpenMMLab. All rights reserved. from .default_scope import DefaultScope from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RU...
import asyncio import os from typing import Dict, List import pytest import requests from jina import Flow from jina.logging.logger import JinaLogger from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2 from tests.k8s_otel.util import get_last_health_check_data, parse_string_jaeger_tags @pytest.mark.asyncio...
import asyncio import os from typing import Dict, List import pytest import requests from jina import Flow from jina.logging.logger import JinaLogger from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2 from tests.k8s_otel.util import get_last_health_check_data, parse_string_jaeger_tags @pytest.mark.asyncio...
import math import os import pytest import torch import torchvision from torchvision.io import _HAS_GPU_VIDEO_DECODER, VideoReader try: import av except ImportError: av = None VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos") @pytest.mark.skipif(_HAS_GPU_VIDEO_DECODER...
import math import os import pytest import torch from torchvision.io import _HAS_GPU_VIDEO_DECODER, VideoReader try: import av except ImportError: av = None VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos") @pytest.mark.skipif(_HAS_GPU_VIDEO_DECODER is False, reason="...
# Copyright (c) OpenMMLab. All rights reserved. from .config import Config, ConfigDict, DictAction from .get_config_model import get_config, get_model __all__ = ['Config', 'ConfigDict', 'DictAction', 'get_config', 'get_model']
# Copyright (c) OpenMMLab. All rights reserved. from .config import Config, ConfigDict, DictAction __all__ = ['Config', 'ConfigDict', 'DictAction']
from typing import List import torch from parameterized import parameterized from torchaudio import sox_effects from torchaudio_unittest.common_utils import ( get_sinusoid, save_wav, skipIfNoSox, TempDirMixin, torch_script, TorchaudioTestCase, ) from .common import load_params class SoxEffec...
from typing import List import torch from parameterized import parameterized from torchaudio import sox_effects from torchaudio_unittest.common_utils import ( TempDirMixin, TorchaudioTestCase, skipIfNoSox, get_sinusoid, save_wav, torch_script, ) from .common import ( load_params, ) class...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.utils import ConvUpsample @pytest.mark.parametrize('num_layers', [0, 1, 2]) def test_conv_upsample(num_layers): num_upsample = num_layers if num_layers > 0 else 0 num_layers = num_layers if num_layers > 0 else 1 ...
import pytest import torch from mmdet.models.utils import ConvUpsample @pytest.mark.parametrize('num_layers', [0, 1, 2]) def test_conv_upsample(num_layers): num_upsample = num_layers if num_layers > 0 else 0 num_layers = num_layers if num_layers > 0 else 1 layer = ConvUpsample( 10, 5, ...
_base_ = './fast-rcnn_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
_base_ = './fast_rcnn_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
import math from keras.src import backend from keras.src import layers from keras.src import ops from keras.src.api_export import keras_export @keras_export("keras.layers.GaussianDropout") class GaussianDropout(layers.Layer): """Apply multiplicative 1-centered Gaussian noise. As it is a regularization layer...
import math from keras.src import backend from keras.src import layers from keras.src import ops from keras.src.api_export import keras_export @keras_export("keras.layers.GaussianDropout") class GaussianDropout(layers.Layer): """Apply multiplicative 1-centered Gaussian noise. As it is a regularization layer...
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# Copyright (c) OpenMMLab. All rights reserved. import os import pytest import torch import torch.nn as nn from torch.distributed import destroy_process_group, init_process_group from torch.nn.parallel import DataParallel, DistributedDataParallel from mmengine.model import (MMDistributedDataParallel, ...
# Copyright (c) OpenMMLab. All rights reserved. import os import pytest import torch import torch.nn as nn from torch.distributed import destroy_process_group, init_process_group from torch.nn.parallel import DataParallel, DistributedDataParallel from mmengine.model import (MMDistributedDataParallel, ...
# Copyright (c) OpenMMLab. All rights reserved. from .base_data_element import BaseDataElement from .instance_data import InstanceData from .sampler import DefaultSampler, InfiniteSampler from .utils import pseudo_collate, worker_init_fn __all__ = [ 'BaseDataElement', 'DefaultSampler', 'InfiniteSampler', 'worker_i...
# Copyright (c) OpenMMLab. All rights reserved. from .base_data_element import BaseDataElement from .sampler import DefaultSampler, InfiniteSampler from .utils import pseudo_collate, worker_init_fn __all__ = [ 'BaseDataElement', 'DefaultSampler', 'InfiniteSampler', 'worker_init_fn', 'pseudo_collate' ]
"""XGBoost: eXtreme Gradient Boosting library. Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md """ from . import tracker # noqa from . import collective from .core import ( Booster, DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix, _py_version, build_inf...
"""XGBoost: eXtreme Gradient Boosting library. Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md """ from . import tracker # noqa from . import collective, dask from .core import ( Booster, DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix, _py_version, bui...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.image import affine_transform from keras.src.ops.image import crop_images from keras.src.ops.image import extract_patches from keras.src.ops.image import gaussian_blur from keras....
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.image import affine_transform from keras.src.ops.image import crop_images from keras.src.ops.image import extract_patches from keras.src.ops.image import hsv_to_rgb from keras.src...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class YOLOXModeSwitchHook(Hook): """Switch the mode of YOLOX during training. This hook turns off the mosaic and mixup data augmentation and switches ...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class YOLOXModeSwitchHook(Hook): """Switch the mode of YOLOX during training. This hook turns off the mosaic and mixup data augmentation and switches ...
from typing import Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.messages import BaseMessage from langchain_core.outputs import ChatGeneration, Generation from langchain.agents.agent import MultiActionAgentOutputParser from langchain.agents.output_parsers.tools import ( Tool...
from typing import Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.messages import BaseMessage from langchain_core.outputs import ChatGeneration, Generation from langchain.agents.agent import MultiActionAgentOutputParser from langchain.agents.output_parsers.tools import ( Tool...
"""Feature selection algorithms. These include univariate filter selection methods and the recursive feature elimination algorithm. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._base import SelectorMixin from ._from_model import SelectFromModel from ._mutual_info import mu...
"""Feature selection algorithms. These include univariate filter selection methods and the recursive feature elimination algorithm. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._base import SelectorMixin from ._from_model import SelectFromModel from ._mutual_info import mu...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa model = dict( type='LAD', data_preprocess...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa preprocess_cfg = dict( mean=[123.675, 116.28,...
import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class RandomRotationTest(testing.TestCase): @parameterized.named_parameters( ("random_rotate_neg4", -0.4), ("ra...
import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class RandomRotationTest(testing.TestCase): @parameterized.named_parameters( ("random_rotate_neg4", -0.4), ("ra...
"""Test embedding functionalities.""" from collections import defaultdict from typing import Any, Dict, List from unittest.mock import patch import pytest from llama_index.core.indices.tree.base import TreeIndex from llama_index.core.indices.tree.select_leaf_embedding_retriever import ( TreeSelectLeafEmbeddingRet...
"""Test embedding functionalities.""" from collections import defaultdict from typing import Any, Dict, List from unittest.mock import patch import pytest from llama_index.core.indices.tree.base import TreeIndex from llama_index.core.indices.tree.select_leaf_embedding_retriever import ( TreeSelectLeafEmbeddingRet...
from __future__ import annotations import math from pathlib import Path import numpy as np import pytest from packaging.version import Version, parse from tokenizers import Tokenizer from sentence_transformers import SentenceTransformer from sentence_transformers.models.StaticEmbedding import StaticEmbedding try: ...
from __future__ import annotations import math from pathlib import Path import numpy as np import pytest from tokenizers import Tokenizer from sentence_transformers import SentenceTransformer from sentence_transformers.models.StaticEmbedding import StaticEmbedding try: import model2vec except ImportError: m...
from pathlib import Path from typing import Dict import numpy as np import pytest from jina import Document, DocumentArray, Executor from ...paddle_image import ImagePaddlehubEncoder input_dim = 224 target_output_dim = 2048 num_doc = 2 test_data = np.random.rand(num_doc, 3, input_dim, input_dim) tmp_files = [] def...
from pathlib import Path from typing import Dict import numpy as np from jina import DocumentArray, Document, Executor from ...paddle_image import ImagePaddlehubEncoder input_dim = 224 target_output_dim = 2048 num_doc = 2 test_data = np.random.rand(num_doc, 3, input_dim, input_dim) tmp_files = [] def test_config():...
import importlib import os import re import types from typing import Any, Optional import numpy as np try: import torch # noqa: F401 except ImportError: torch_imported = False else: torch_imported = True try: import tensorflow as tf # type: ignore # noqa: F401 except (ImportError, TypeError): ...
import importlib import os import re import types from typing import Any, Optional import numpy as np try: import torch # noqa: F401 except ImportError: torch_imported = False else: torch_imported = True try: import tensorflow as tf # type: ignore # noqa: F401 except (ImportError, TypeError): ...
import zlib from typing import Iterator, TextIO def exact_div(x, y): assert x % y == 0 return x // y def str2bool(string): str2val = {"True": True, "False": False} if string in str2val: return str2val[string] else: raise ValueError(f"Expected one of {set(str2val.keys())}, got {st...
import zlib from typing import Iterator, TextIO def exact_div(x, y): assert x % y == 0 return x // y def str2bool(string): str2val = {"True": True, "False": False} if string in str2val: return str2val[string] else: raise ValueError(f"Expected one of {set(str2val.keys())}, got {st...
from __future__ import annotations from typing import Any, List, Optional, cast from langchain_text_splitters.base import TextSplitter, Tokenizer, split_text_on_tokens class SentenceTransformersTokenTextSplitter(TextSplitter): """Splitting text to tokens using sentence model tokenizer.""" def __init__( ...
from __future__ import annotations from typing import Any, List, Optional, cast from langchain_text_splitters.base import TextSplitter, Tokenizer, split_text_on_tokens class SentenceTransformersTokenTextSplitter(TextSplitter): """Splitting text to tokens using sentence model tokenizer.""" def __init__( ...
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents.""" from __future__ import annotations from collections.abc import Sequence from typing import Any, Callable, Optional, cast from langchain_core.callbacks.manager import Callbacks from langchain_core.documents import Document from la...
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents.""" from __future__ import annotations from typing import Any, Callable, Dict, Optional, Sequence, cast from langchain_core.callbacks.manager import Callbacks from langchain_core.documents import Document from langchain_core.language...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resn...
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resn...
import asyncio import logging import os import threading import time from functools import wraps from uuid import uuid4 from tenacity import retry, stop_after_attempt, wait_exponential from backend.util.process import get_service_name logger = logging.getLogger(__name__) def _log_prefix(resource_name: str, conn_id...
import asyncio import logging import os import threading from functools import wraps from uuid import uuid4 from tenacity import retry, stop_after_attempt, wait_exponential from backend.util.process import get_service_name logger = logging.getLogger(__name__) def _log_prefix(resource_name: str, conn_id: str): ...
from __future__ import annotations from copy import deepcopy import pytest from sentence_transformers import SparseEncoder @pytest.fixture(scope="session") def _splade_bert_tiny_model() -> SparseEncoder: model = SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq") model.model_card_data.generate_widg...
from __future__ import annotations import pytest from sentence_transformers import SparseEncoder @pytest.fixture() def splade_bert_tiny_model() -> SparseEncoder: return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq") @pytest.fixture(scope="session") def splade_bert_tiny_model_reused() -> SparseEnc...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import List, Tuple, Union from mmcv.runner import BaseModule from torch import Tensor from mmdet.core.utils import (InstanceList, OptInstanceList, OptMultiConfig, OptSamplingResultList, Sa...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule class BaseMaskHead(BaseModule, metaclass=ABCMeta): """Base class for mask heads used in One-Stage Instance Segmentation.""" def __init__(self, init_cfg): super(BaseMaskHead, sel...
from __future__ import annotations from pathlib import Path from unittest.mock import Mock, PropertyMock import pytest import torch from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import InformationRetrievalEvaluator from sentence_transformers.util import cos_sim @pytest...
from __future__ import annotations from pathlib import Path from unittest.mock import Mock, PropertyMock import pytest import torch from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import InformationRetrievalEvaluator from sentence_transformers.util import cos_sim @pytest...
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')), neck=dict(norm_cfg=norm_cfg), roi_...
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')), neck=dict(norm_cfg=norm_cfg), roi_...
import os import subprocess import sys import pytest from xgboost import testing as tm DEMO_DIR = tm.demo_dir(__file__) PYTHON_DEMO_DIR = os.path.join(DEMO_DIR, "guide-python") @pytest.mark.skipif(**tm.no_cupy()) def test_data_iterator(): script = os.path.join(PYTHON_DEMO_DIR, "quantile_data_iterator.py") ...
import os import subprocess import sys import pytest from xgboost import testing as tm sys.path.append("tests/python") import test_demos as td # noqa @pytest.mark.skipif(**tm.no_cupy()) def test_data_iterator(): script = os.path.join(td.PYTHON_DEMO_DIR, "quantile_data_iterator.py") cmd = ["python", script...
import os import warnings from pathlib import Path import torch from torchaudio._internal import module_utils as _mod_utils # noqa: F401 _LIB_DIR = Path(__file__).parent / "lib" def _get_lib_path(lib: str): suffix = "pyd" if os.name == "nt" else "so" path = _LIB_DIR / f"{lib}.{suffix}" return path de...
import os import warnings from pathlib import Path import torch from torchaudio._internal import module_utils as _mod_utils # noqa: F401 _LIB_DIR = Path(__file__).parent / "lib" def _get_lib_path(lib: str): suffix = "pyd" if os.name == "nt" else "so" path = _LIB_DIR / f"{lib}.{suffix}" return path de...
from .filtering import ( allpass_biquad, band_biquad, bandpass_biquad, bandreject_biquad, bass_biquad, biquad, contrast, dcshift, deemph_biquad, dither, equalizer_biquad, filtfilt, flanger, gain, highpass_biquad, lfilter, lowpass_biquad, overdrive,...
from .filtering import ( allpass_biquad, band_biquad, bandpass_biquad, bandreject_biquad, bass_biquad, biquad, contrast, dcshift, deemph_biquad, dither, equalizer_biquad, filtfilt, flanger, gain, highpass_biquad, lfilter, lowpass_biquad, overdrive,...
# Copyright (c) OpenMMLab. All rights reserved. from unittest.mock import Mock from mmengine.hooks import IterTimerHook class TestIterTimerHook: def test_before_epoch(self): hook = IterTimerHook() runner = Mock() hook._before_epoch(runner) assert isinstance(hook.t, float) de...
# Copyright (c) OpenMMLab. All rights reserved. from unittest.mock import Mock from mmengine.hooks import IterTimerHook class TestIterTimerHook: def test_before_epoch(self): hook = IterTimerHook() runner = Mock() hook._before_epoch(runner) assert isinstance(hook.t, float) de...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import fire from llama import Llama from typing import List def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, top_p...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, top_p: float = 0.9, max_...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow T = TypeVar('T', bound='ImageTensorFlowTensor') @_register_pr...
from abc import ABC from docarray.array.storage.weaviate.backend import BackendMixin, WeaviateConfig from docarray.array.storage.weaviate.find import FindMixin from docarray.array.storage.weaviate.getsetdel import GetSetDelMixin from docarray.array.storage.weaviate.seqlike import SequenceLikeMixin __all__ = ['Storage...
from abc import ABC from .backend import BackendMixin, WeaviateConfig from .find import FindMixin from .getsetdel import GetSetDelMixin from .seqlike import SequenceLikeMixin __all__ = ['StorageMixins', 'WeaviateConfig'] class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC): ...
""" Pandas output parser. DEPRECATED: This class has been moved to `llama-index-experimental`. """ from typing import Any class PandasInstructionParser: """ Pandas instruction parser. DEPRECATED: This class has been moved to `llama-index-experimental`. """ def __init__(self, *args: Any, **kwa...
"""Pandas output parser. DEPRECATED: This class has been moved to `llama-index-experimental`. """ from typing import Any class PandasInstructionParser: """Pandas instruction parser. DEPRECATED: This class has been moved to `llama-index-experimental`. """ def __init__(self, *args: Any, **kwargs: A...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
from typing import Any, Dict, List, Optional, Sequence, Type, Union import PIL.Image import torch from torchvision import datapoints from torchvision.prototype.datapoints import Label, OneHotLabel from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import _setup_fill...
from typing import Any, Dict, List, Optional, Sequence, Type, Union import PIL.Image import torch from torchvision import datapoints from torchvision.prototype.datapoints import Label, OneHotLabel from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import _setup_fill...
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class DETR(SingleStageDetector): r"""Implementation of `DETR: End-to-End Object Detection with Transformers <https://arxiv.o...
import warnings import torch from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class DETR(SingleStageDetector): r"""Implementation of `DETR: End-to-End Object Detection with Transformers <https://arxiv.org/pdf/2005.12872>`_""" def __init__(self, ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.callbacks.streamlit.streamlit_callback_handler import ( LLMThought, LLMThoughtLabeler, LLMThoughtState, StreamlitCallbackHandler, ToolRecord, ) #...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.callbacks.streamlit.streamlit_callback_handler import ( LLMThought, LLMThoughtLabeler, LLMThoughtState, StreamlitCallbackHandler, ToolRecord, ) #...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead, MaskIoUHead) from .utils import _dummy_bbox_sampling def test_mask_head_loss(): """Test mask head loss when mask tar...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead, MaskIoUHead) from .utils import _dummy_bbox_sampling def test_mask_head_loss(): """Test mask head loss when mask tar...
""" This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage:...
""" This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage:...
import base64 from typing import Any, Dict, Union, Optional from vertexai.generative_models._generative_models import SafetySettingsType from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types from llama_index.core.llms import ChatMessage, MessageRole def is_gemini_model(model: str) -> bool: ...
import base64 from typing import Any, Dict, Union, Optional from vertexai.generative_models._generative_models import SafetySettingsType from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types from llama_index.core.llms import ChatMessage, MessageRole def is_gemini_model(model: str) -> bool: ...
""" OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages. On the website, you can download parallel datasets for many languages in different formats. I found that the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal overhea...
""" OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages. On the website, you can download parallel datasets for many languages in different formats. I found that the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal overhea...
__version__ = '0.30.0a3' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler() formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s") ha...
__version__ = '0.30.0a3' import logging from docarray.array import DocArray, DocArrayStacked from docarray.base_doc.doc import BaseDoc __all__ = ['BaseDoc', 'DocArray', 'DocArrayStacked'] logger = logging.getLogger('docarray') handler = logging.StreamHandler() formatter = logging.Formatter("%(levelname)s - %(name)...
""" Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning. It is available for more than 300 languages. This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like """ import gzip import os import tarfile impo...
""" Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning. It is available for more than 300 languages. This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like """ import os import sentence_transformers imp...
import os import numpy as np import pytest from docarray import BaseDoc, DocList, DocVec from docarray.documents import ImageDoc from docarray.typing import NdArray, TorchTensor class MyDoc(BaseDoc): embedding: NdArray text: str image: ImageDoc @pytest.mark.slow @pytest.mark.parametrize( 'protocol...
import os import numpy as np import pytest from docarray import BaseDoc, DocList, DocVec from docarray.documents import ImageDoc from docarray.typing import NdArray class MyDoc(BaseDoc): embedding: NdArray text: str image: ImageDoc @pytest.mark.slow @pytest.mark.parametrize( 'protocol', ['pickle-a...
import pyarrow as pa import pytest from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.arrow.arrow import Arrow, ArrowConfig @pytest.fixture def arrow_file_streaming_format(tmp_path): filename = tmp_path / "stream.arrow" testdata = [[1, ...
import pytest from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.arrow.arrow import ArrowConfig def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = ArrowConfig(name=...
from __future__ import annotations from typing import Callable try: from typing import Self except ImportError: from typing_extensions import Self from torch import Tensor, nn from sentence_transformers.models.Module import Module from sentence_transformers.util import fullname, import_from_string class D...
from __future__ import annotations import json import os from typing import Callable import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import Tensor, nn from sentence_transformers.util import fullname, import_...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Callable import numpy as np import pytest from jina import Document, DocumentArray, Flow from ...audioclip_image import AudioCLIPImageEncoder @pytest.mark.parametrize("request_size", [1, 10...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Callable import numpy as np import pytest from jina import Document, DocumentArray, Flow from jinahub.encoder.audioclip_image import AudioCLIPImageEncoder @pytest.mark.parametrize("request_...
from setuptools import find_packages, setup with open("README.md", mode="r", encoding="utf-8") as readme_file: readme = readme_file.read() setup( name="sentence-transformers", version="3.1.0.dev0", author="Nils Reimers, Tom Aarsen", author_email="info@nils-reimers.de", description="Multilingu...
from setuptools import find_packages, setup with open("README.md", mode="r", encoding="utf-8") as readme_file: readme = readme_file.read() setup( name="sentence-transformers", version="3.1.0.dev0", author="Nils Reimers, Tom Aarsen", author_email="info@nils-reimers.de", description="Multilingu...
"""Rss reader.""" from typing import List, Any, Union import logging from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document logger = logging.getLogger(__name__) class RssReader(BasePydanticReader): """RSS reader. Reads content from an RSS feed. """ ...
"""Rss reader.""" from typing import List from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document class RssReader(BasePydanticReader): """RSS reader. Reads content from an RSS feed. """ is_remote: bool = True html_to_text: bool = False @c...
"""Tool for the SemanticScholar API.""" from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.semanticscholar import SemanticScholarAPIWrapper class Semantsc...
"""Tool for the SemanticScholar API.""" from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.semanticscholar import SemanticScholarAPIWrapper class Semantsc...
# Copyright (c) OpenMMLab. All rights reserved. import os import os.path as osp import warnings import numpy as np import onnx import onnxruntime as ort import torch import torch.nn as nn ort_custom_op_path = '' try: from mmcv.ops import get_onnxruntime_op_path ort_custom_op_path = get_onnxruntime_op_path() e...
import os import os.path as osp import warnings import numpy as np import onnx import onnxruntime as ort import torch import torch.nn as nn ort_custom_op_path = '' try: from mmcv.ops import get_onnxruntime_op_path ort_custom_op_path = get_onnxruntime_op_path() except (ImportError, ModuleNotFoundError): wa...
import os from pathlib import Path from jina import Executor def test_config(): ex = Executor.load_config( str(Path(__file__).parents[2] / 'config.yml'), override_with={ 'query_features': ['query'], 'match_features': ['match'], 'relevance_label': 'rel', ...
import os def test_init(ranker): assert not ranker.model.is_fitted() def test_train(ranker, documents_to_train_stub_model): ranker.train(docs=documents_to_train_stub_model) assert ranker.model.is_fitted() def test_train_with_weights(ranker_with_weight, documents_to_train_stub_model): """Weight fie...
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**. The LangChain Expression Language (LCEL) offers a declarative method to build production-grade programs that harness the power of LLMs. Programs created using LCEL and LangChain Runnables inherently support synchronous, asynchronous, batch, a...
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**. The LangChain Expression Language (LCEL) offers a declarative method to build production-grade programs that harness the power of LLMs. Programs created using LCEL and LangChain Runnables inherently support synchronous, asynchronous, batch, a...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray')...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray') if TYPE_CHECKING: from pydantic import BaseConfig ...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.data_elements import DetDataSample from mmdet.testing import demo_mm_inputs, get_detector_cfg from mmdet.utils import register_all_modules class TestSingleSta...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.core import DetDataSample from mmdet.testing import demo_mm_inputs, get_detector_cfg from mmdet.utils import register_all_modules class TestSingleStageInstanc...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Callable, List import pytest from jina import DocumentArray, Flow from ...transform_encoder import TransformerTorchEncoder @pytest.mark.parametrize("request_size", [1, 10, 50, 100]) def test_inte...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Callable, List import pytest from jina import DocumentArray, Flow from ...transform_encoder import TransformerTorchEncoder @pytest.mark.parametrize("request_size", [1, 10, 50, 100]) def test_inte...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.hooks import Hook from mmdet.registry import HOOKS @HOOKS.register_module() class FastStopTrainingHook(Hook): """Set runner's epoch information to the model.""" def __init__(self, by_epoch, save_ckpt=False, stop_iter_or_epoch=5): self.by_...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.hooks import Hook from mmdet.registry import HOOKS @HOOKS.register_module() class FastStopTrainingHook(Hook): """Set runner's epoch information to the model.""" def after_train_iter(self, runner, batch_idx: int, data_batch: None, ...
import os from pathlib import Path import pytest from jina import Flow from jina.excepts import RuntimeFailToStart from jina.orchestrate.deployments import Deployment from jina.parsers import set_deployment_parser from jina.serve.executors import BaseExecutor cur_dir = os.path.dirname(os.path.abspath(__file__)) de...
import os from pathlib import Path import pytest from jina import Flow from jina.excepts import RuntimeFailToStart from jina.orchestrate.deployments import Deployment from jina.parsers import set_deployment_parser from jina.serve.executors import BaseExecutor cur_dir = os.path.dirname(os.path.abspath(__file__)) de...
# Copyright (c) OpenMMLab. All rights reserved. from .vis_backend import (BaseVisBackend, ClearMLVisBackend, LocalVisBackend, MLflowVisBackend, NeptuneVisBackend, TensorboardVisBackend, WandbVisBackend) from .visualizer import Visualizer __all__ = [ 'Visualizer',...
# Copyright (c) OpenMMLab. All rights reserved. from .vis_backend import (BaseVisBackend, ClearMLVisBackend, LocalVisBackend, MLflowVisBackend, TensorboardVisBackend, WandbVisBackend) from .visualizer import Visualizer __all__ = [ 'Visualizer', 'BaseVisBackend', ...