input
stringlengths
33
5k
output
stringlengths
32
5k
import json from typing import List import requests from langchain_core.documents import Document from langchain_core.utils import secret_from_env from pydantic import BaseModel, Field, SecretStr class BraveSearchWrapper(BaseModel): """Wrapper around the Brave search engine.""" api_key: SecretStr = Field( ...
import json from typing import List import requests from langchain_core.documents import Document from pydantic import BaseModel, Field class BraveSearchWrapper(BaseModel): """Wrapper around the Brave search engine.""" api_key: str """The API key to use for the Brave search engine.""" search_kwargs:...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Callable, Optional, Union from torch.testing import assert_allclose as _assert_allclose from mmengine.utils import digit_version from mmengine.utils.dl_utils import TORCH_VERSION def assert_allclose( actual: Any, expected: Any, rtol...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Callable, Optional, Union from torch.testing import assert_allclose as _assert_allclose from mmengine.utils import TORCH_VERSION, digit_version def assert_allclose( actual: Any, expected: Any, rtol: Optional[float] = None, atol:...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Sequence from mmengine.hooks import Hook from mmengine.model import is_model_wrapper from mmdet.registry import HOOKS @HOOKS.register_module() class YOLOXModeSwitchHook(Hook): """Switch the mode of YOLOX during training. This hook turns off...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Sequence from mmengine.hooks import Hook from mmengine.model import is_model_wrapper from mmdet.registry import HOOKS @HOOKS.register_module() class YOLOXModeSwitchHook(Hook): """Switch the mode of YOLOX during training. This hook turns off...
import numpy as np import pytest from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class StringLookupTest(testing.TestCase): # TODO: increase coverage. Most features aren't being tested. def test_config(self): layer = laye...
import numpy as np from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class StringLookupTest(testing.TestCase): # TODO: increase coverage. Most features aren't being tested. def test_config(self): layer = layers.StringLooku...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import VFNetHead def test_vfnet_head_loss(): """Tests vfnet head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad...
import mmcv import torch from mmdet.models.dense_heads import VFNetHead def test_vfnet_head_loss(): """Tests vfnet head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.C...
_base_ = ['./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py'] data_root = 'data/MOT20/' img_scale = (1600, 896) # width, height # model settings model = dict( data_preprocessor=dict(batch_augments=[ dict(type='BatchSyncRandomResize', random_size_range=(640, 1152)) ])) train_pipelin...
_base_ = ['./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py'] data_root = 'data/MOT20/' img_scale = (1600, 896) # width, height # model settings model = dict( data_preprocessor=dict(batch_augments=[ dict(type='BatchSyncRandomResize', random_size_range=(640, 1152)) ])) train_pipelin...
""" This is a simple application for sparse encoder: Computing embeddings. we have multiple sentences and we want to compute their embeddings. The embeddings are sparse, meaning that most of the values are zero. The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation. w...
""" This is a simple application for sparse encoder: Computing embeddings. we have multiple sentences and we want to compute their embeddings. The embeddings are sparse, meaning that most of the values are zero. The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation. w...
_base_ = 'tridentnet_r50-caffe_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (133...
_base_ = 'tridentnet_r50-caffe_1x_coco.py' train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 7...
"""String output parser.""" from langchain_core.output_parsers.transform import BaseTransformOutputParser class StrOutputParser(BaseTransformOutputParser[str]): """OutputParser that parses LLMResult into the top likely string.""" @classmethod def is_lc_serializable(cls) -> bool: """StrOutputPars...
"""String output parser.""" from langchain_core.output_parsers.transform import BaseTransformOutputParser class StrOutputParser(BaseTransformOutputParser[str]): """OutputParser that parses LLMResult into the top likely string.""" @classmethod def is_lc_serializable(cls) -> bool: """StrOutputPars...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine import Config # isort:skip cfg = Config.fromfile('tests/data/config/py_config/simple_config.py') item5 = cfg.item1[0] + cfg.item2.a
# Copyright (c) OpenMMLab. All rights reserved. from mmcv import Config # isort:skip cfg = Config.fromfile('tests/data/config/py_config/simple_config.py') item5 = cfg.item1[0] + cfg.item2.a
import sys import pytest from fastapi._compat import PYDANTIC_V2 from inline_snapshot import Snapshot needs_py39 = pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9+") needs_py310 = pytest.mark.skipif( sys.version_info < (3, 10), reason="requires python3.10+" ) needs_pydanticv2 = pytest.mar...
import sys import pytest from fastapi._compat import PYDANTIC_V2 needs_py39 = pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9+") needs_py310 = pytest.mark.skipif( sys.version_info < (3, 10), reason="requires python3.10+" ) needs_pydanticv2 = pytest.mark.skipif(not PYDANTIC_V2, reason="req...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.core import bbox2roi from mmdet.models.roi_heads.bbox_heads import SABLHead from .utils import _dummy_bbox_sampling def test_sabl_bbox_head_loss(): """Tests bbox head loss when truth is empty and non-empty.""" self = SABLHead...
import mmcv import torch from mmdet.core import bbox2roi from mmdet.models.roi_heads.bbox_heads import SABLHead from .utils import _dummy_bbox_sampling def test_sabl_bbox_head_loss(): """Tests bbox head loss when truth is empty and non-empty.""" self = SABLHead( num_classes=4, cls_in_channels...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule from ...builder import build_loss class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta): """Base class for panoptic heads.""" def __init__(self, num_things_class...
from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule from ...builder import build_loss class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta): """Base class for panoptic heads.""" def __init__(self, num_things_classes=80, num_stuff_classes=53, ...
import os import numpy as np import pytest import torch from pydantic.tools import parse_obj_as from docarray import BaseDocument from docarray.typing import ( AudioNdArray, AudioTorchTensor, VideoNdArray, VideoTorchTensor, ) from docarray.utils.misc import is_tf_available tf_available = is_tf_availa...
import os import numpy as np import pytest import torch from pydantic.tools import parse_obj_as from docarray import BaseDocument from docarray.typing import ( AudioNdArray, AudioTorchTensor, VideoNdArray, VideoTorchTensor, ) from docarray.utils.misc import is_tf_available tf_available = is_tf_availa...
import unittest import torch from transformers import AutoTokenizer, Gemma2Config, Gemma2Model from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Text2ImgPipeline, Lumina2Transformer2DModel, ) from ..test_pipelines_common import PipelineTesterMixin class Lumina2Text2ImgP...
import unittest import numpy as np import torch from transformers import AutoTokenizer, Gemma2Config, Gemma2Model from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Text2ImgPipeline, Lumina2Transformer2DModel, ) from diffusers.utils.testing_utils import torch_device from ....
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.evaluator import DumpResults from mmengine.runner import Runner from mmdet.engine.hooks.utils import trigger_visualization_hook from mmdet.registry import RUNNER...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.runner import Runner from mmdet.engine.hooks.utils import trigger_visualization_hook from mmdet.registry import RUNNERS from mmdet.utils import add_dump_metric, ...
_base_ = '../ssd/ssd512_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
_base_ = '../ssd/ssd512_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) default_hooks = dict( optimizer=dict( _delete_=True, type='OptimizerHook', grad_clip=dict(max_norm=35, norm_type=2)))
from __future__ import annotations import math from pathlib import Path import numpy as np import pytest from tokenizers import Tokenizer from sentence_transformers import SentenceTransformer from sentence_transformers.models.StaticEmbedding import StaticEmbedding try: import model2vec except ImportError: m...
from __future__ import annotations import math from pathlib import Path import numpy as np import pytest from tokenizers import Tokenizer from sentence_transformers import SentenceTransformer from sentence_transformers.models.StaticEmbedding import StaticEmbedding try: import model2vec except ImportError: m...
from prisma.models import User from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock from backend.blocks.text import FillTextTemplateBlock from backend.data import graph from backend.data.graph import create_graph from backend.data.user import get_or_create_user from backend.util.test import SpinTestSe...
from prisma.models import User from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock from backend.blocks.text import FillTextTemplateBlock from backend.data import graph from backend.data.graph import create_graph from backend.data.user import get_or_create_user from backend.util.test import SpinTestSe...
# Copyright (c) OpenMMLab. All rights reserved. import logging from typing import Any, List, Optional, Sequence, Tuple import torch from torch.nn.parameter import Parameter from torch.nn.utils import clip_grad from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook DATA_BA...
# Copyright (c) OpenMMLab. All rights reserved. import logging from typing import Any, List, Optional, Sequence, Tuple import torch from torch.nn.parameter import Parameter from torch.nn.utils import clip_grad from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook DATA_BA...
from jina.serve.runtimes.gateway.websocket.gateway import WebSocketGateway
import asyncio from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app __all__ = ['WebSocketGatewayRuntime'] from jina.serve.runtimes.gateway.websocket.gateway import WebSocketGateway class WebSocketGatewayRuntime(GatewayRuntime): """Runtime ...
""" =================================== How to write your own v2 transforms =================================== .. note:: Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_ or :ref:`go to the end <sphx_glr_downlo...
""" =================================== How to write your own v2 transforms =================================== .. note:: Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_ or :ref:`go to the end <sphx_glr_downlo...
import unittest import torch import torchaudio.prototype.functional as F from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script class TorchScriptConsistencyTestImpl(TestBaseMixin): def _assert_consistency(self, func, inputs, shape_only=False): inputs_ = [] for i i...
import unittest import torch import torchaudio.prototype.functional as F from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script class TorchScriptConsistencyTestImpl(TestBaseMixin): def _assert_consistency(self, func, inputs, shape_only=False): inputs_ = [] for i i...
from __future__ import annotations from typing import Any import PIL.Image import torch from ._tv_tensor import TVTensor class Image(TVTensor): """:class:`torch.Tensor` subclass for images with shape ``[..., C, H, W]``. .. note:: In the :ref:`transforms <transforms>`, ``Image`` instances are larg...
from __future__ import annotations from typing import Any, Optional, Union import PIL.Image import torch from ._tv_tensor import TVTensor class Image(TVTensor): """:class:`torch.Tensor` subclass for images with shape ``[..., C, H, W]``. .. note:: In the :ref:`transforms <transforms>`, ``Image`` i...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg MATCH_COST = Registry('Match Cost') def build_match_cost(cfg, default_args=None): """Builder of IoU calculator.""" return build_from_cfg(cfg, MATCH_COST, default_args)
from mmcv.utils import Registry, build_from_cfg MATCH_COST = Registry('Match Cost') def build_match_cost(cfg, default_args=None): """Builder of IoU calculator.""" return build_from_cfg(cfg, MATCH_COST, default_args)
__version__ = '0.17.1' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
__version__ = '0.17.0' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
import pytest from jina.enums import GatewayProtocolType from jina.helper import ArgNamespace from jina.parsers import set_gateway_parser, set_pod_parser @pytest.mark.parametrize( 'port,expected_port', [ ('12345', [12345]), ([12345], [12345]), ([12345, 12344], [12345, 12344]), ], ...
import pytest from jina.enums import GatewayProtocolType from jina.helper import ArgNamespace from jina.parsers import set_gateway_parser, set_pod_parser @pytest.mark.parametrize( 'port,expected_port', [ ('12345', [12345]), ([12345], [12345]), ([12345, 12344], [12345, 12344]), ], ...
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmengine.model import BaseModule from mmdet.models.backbones import ResNet from mmdet.models.layers import ResLayer as _ResLayer from mmdet.registry import MODELS @MODELS.register_module() class ResLayer(BaseModule): def...
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmengine.model import BaseModule from mmdet.models.backbones import ResNet from mmdet.models.utils import ResLayer as _ResLayer from mmdet.registry import MODELS @MODELS.register_module() class ResLayer(BaseModule): def ...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import TOODHead def test_tood_head_loss(): """Tests paa head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_sh...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import TOODHead def test_paa_head_loss(): """Tests paa head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_sha...
__version__ = '0.13.22' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
__version__ = '0.13.21' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import numpy as np import pytest from jina import Document, DocumentArray, Executor from transformer_tf_text_encode import TransformerTFTextEncoder target_dim = 768 @pytest.fixture() ...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import numpy as np import pytest from jina import Document, DocumentArray, Executor from transformer_tf_text_encode import TransformerTFTextEncoder target_dim = 768 @pytest.fixture() ...
import time from typing import Callable from pydantic import Field from docarray import BaseDoc from docarray.typing import NdArray N_DIM = 10 class SimpleSchema(BaseDoc): text: str = Field(index_name='text_index') number: int embedding: NdArray[10] = Field(dim=10, index_name="vector_index") class Si...
import time from typing import Callable from pydantic import Field from docarray import BaseDoc from docarray.typing import NdArray N_DIM = 10 class SimpleSchema(BaseDoc): text: str = Field(index_name='text_index') number: int embedding: NdArray[10] = Field(dim=10, index_name="vector_index") class Si...
""" Example of training with Dask on CPU ==================================== """ from dask import array as da from dask.distributed import Client, LocalCluster from xgboost import dask as dxgb from xgboost.dask import DaskDMatrix def main(client): # generate some random data for demonstration m = 100000 ...
""" Example of training with Dask on CPU ==================================== """ from dask import array as da from dask.distributed import Client, LocalCluster from xgboost import dask as dxgb from xgboost.dask import DaskDMatrix def main(client): # generate some random data for demonstration m = 100000 ...
"""langchain-core version information and utilities.""" VERSION = "0.3.59"
"""langchain-core version information and utilities.""" VERSION = "0.3.58"
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers. It generates sentence embeddings that can be compared using cosine-simi...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers. It generates sentence embeddings that can be compared using cosine-simi...
_base_ = './cascade-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py' model = dict( backbone=dict( stem_channels=128, depth=101, init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest101')))
_base_ = './cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' model = dict( backbone=dict( stem_channels=128, depth=101, init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest101')))
# Copyright (c) OpenMMLab. All rights reserved. from .checkpoint_hook import CheckpointHook from .ema_hook import EMAHook from .empty_cache_hook import EmptyCacheHook from .hook import Hook from .iter_timer_hook import IterTimerHook from .logger_hook import LoggerHook from .naive_visualization_hook import NaiveVisualiz...
# Copyright (c) OpenMMLab. All rights reserved. from .checkpoint_hook import CheckpointHook from .ema_hook import EMAHook from .empty_cache_hook import EmptyCacheHook from .hook import Hook from .iter_timer_hook import IterTimerHook from .logger_hook import LoggerHook from .naive_visualization_hook import NaiveVisualiz...
import numpy as np import pytest import lightgbm @pytest.fixture(scope="function") def missing_module_cffi(monkeypatch): """Mock 'cffi' not being importable""" monkeypatch.setattr(lightgbm.compat, "CFFI_INSTALLED", False) monkeypatch.setattr(lightgbm.basic, "CFFI_INSTALLED", False) @pytest.fixture(scop...
import numpy as np import pytest @pytest.fixture(scope="function") def rng(): return np.random.default_rng() @pytest.fixture(scope="function") def rng_fixed_seed(): return np.random.default_rng(seed=42)
from keras.src.backend.common.tensor_attributes import get_tensor_attr from keras.src.backend.common.tensor_attributes import set_tensor_attr def set_keras_mask(x, mask): return set_tensor_attr(x, "_keras_mask", mask) def get_keras_mask(x): return get_tensor_attr(x, "_keras_mask")
import weakref from keras.src.backend.common import global_state def set_keras_mask(x, mask): try: x._keras_mask = mask except AttributeError: if mask is None: return mask_dict = global_state.get_global_attribute("keras_mask_dict") if mask_dict is None: ...
"""Test Azure AI Search wrapper.""" from langchain_core.documents import Document from langchain_community.retrievers.azure_ai_search import ( AzureAISearchRetriever, AzureCognitiveSearchRetriever, ) def test_azure_ai_search_invoke() -> None: """Test valid call to Azure AI Search. In order to run t...
"""Test Azure AI Search wrapper.""" from langchain_core.documents import Document from langchain_community.retrievers.azure_ai_search import ( AzureAISearchRetriever, AzureCognitiveSearchRetriever, ) def test_azure_ai_search_invoke() -> None: """Test valid call to Azure AI Search. In order to run t...
import logging import time from abc import ABC, abstractmethod from typing import ClassVar from backend.data.model import OAuth2Credentials from backend.integrations.providers import ProviderName logger = logging.getLogger(__name__) class BaseOAuthHandler(ABC): # --8<-- [start:BaseOAuthHandler1] PROVIDER_NA...
import logging import time from abc import ABC, abstractmethod from typing import ClassVar from backend.data.model import OAuth2Credentials logger = logging.getLogger(__name__) class BaseOAuthHandler(ABC): # --8<-- [start:BaseOAuthHandler1] PROVIDER_NAME: ClassVar[str] DEFAULT_SCOPES: ClassVar[list[str]...
from __future__ import annotations from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import ( SparseBinaryClassificationEvaluator, ) from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import ( SparseEmbeddingSimilarityEvaluator, ) from...
from __future__ import annotations from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import ( SparseBinaryClassificationEvaluator, ) from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import ( SparseEmbeddingSimilarityEvaluator, ) from...
_base_ = ['./mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa model = dict( backbone=dict( embed_dims=192, num_heads=[6, 12, 24, 48], init_cfg=dict(...
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa model = dict( backbone=dict( embed_dims=192, num_heads=[6, 12, 24, 48], init_cfg=dict(t...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
import pytest from langchain_openai import ChatOpenAI, OpenAI _EXPECTED_NUM_TOKENS = { "ada": 17, "babbage": 17, "curie": 17, "davinci": 17, "gpt-4": 12, "gpt-4-32k": 12, "gpt-3.5-turbo": 12, "o1": 12, "o3": 12, "gpt-4o": 11, } _MODELS = models = ["ada", "babbage", "curie", "d...
import pytest from langchain_openai import ChatOpenAI, OpenAI _EXPECTED_NUM_TOKENS = { "ada": 17, "babbage": 17, "curie": 17, "davinci": 17, "gpt-4": 12, "gpt-4-32k": 12, "gpt-3.5-turbo": 12, } _MODELS = models = ["ada", "babbage", "curie", "davinci"] _CHAT_MODELS = ["gpt-4", "gpt-4-32k",...
import unittest import torch import torchaudio.functional as F from parameterized import parameterized from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoSox, TorchaudioTestCase from .functional_impl import Functional, FunctionalCPUOnly class TestFunctionalFloat32(Functional, FunctionalCPUOnly, P...
import unittest import torch import torchaudio.functional as F from parameterized import parameterized from torchaudio_unittest.common_utils import ( PytorchTestCase, skipIfNoSox, TorchaudioTestCase, ) from .functional_impl import Functional, FunctionalCPUOnly class TestFunctionalFloat32(Functional, Fun...
"""Utility functions for validating Ollama models.""" from httpx import ConnectError from ollama import Client, ResponseError def validate_model(client: Client, model_name: str) -> None: """Validate that a model exists in the Ollama instance. Args: client: The Ollama client. model_name: The ...
"""Utility functions for validating Ollama models.""" from httpx import ConnectError from ollama import Client, ResponseError def validate_model(client: Client, model_name: str) -> None: """Validate that a model exists in the Ollama instance. Args: client: The Ollama client. model_name: The ...
"""Tool for the Wikipedia API.""" from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.wikipedia import WikipediaAPIWrapper class WikipediaQueryInput(BaseMo...
"""Tool for the Wikipedia API.""" from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.wikipedia import WikipediaAPIWrapper class WikipediaQueryInput(BaseMo...
_base_ = 'mask-rcnn_r50_fpg_crop640-50e_coco.py' model = dict( neck=dict(out_channels=128, inter_channels=128), rpn_head=dict(in_channels=128), roi_head=dict( bbox_roi_extractor=dict(out_channels=128), bbox_head=dict(in_channels=128), mask_roi_extractor=dict(out_channels=128), ...
_base_ = 'mask_rcnn_r50_fpg_crop640_50e_coco.py' model = dict( neck=dict(out_channels=128, inter_channels=128), rpn_head=dict(in_channels=128), roi_head=dict( bbox_roi_extractor=dict(out_channels=128), bbox_head=dict(in_channels=128), mask_roi_extractor=dict(out_channels=128), ...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class TOOD(SingleStageDetector): r"""Implementation of `TOOD: Task-aligned One-stage Object Det...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class TOOD(SingleStageDetector): r"""Implementation of `TOOD: Task-aligned One-stage Object Det...
import os from abc import abstractmethod from unittest import mock import pytest from langchain_core.embeddings import Embeddings from pydantic import SecretStr from langchain_tests.base import BaseStandardTests class EmbeddingsTests(BaseStandardTests): """ :private: """ @property @abstractmeth...
import os from abc import abstractmethod from typing import Tuple, Type from unittest import mock import pytest from langchain_core.embeddings import Embeddings from pydantic import SecretStr from langchain_tests.base import BaseStandardTests class EmbeddingsTests(BaseStandardTests): """ :private: """ ...
_base_ = [ './faster_rcnn_r50_fpn.py', './mot_challenge.py', '../../../configs/_base_/default_runtime.py' ] model = dict( type='Tracktor', pretrains=dict( detector= # noqa: E251 'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth', ...
_base_ = [ './faster_rcnn_r50_fpn.py', './mot_challenge.py', '../../../configs/_base_/default_runtime.py' ] model = dict( type='Tracktor', pretrains=dict( detector= # noqa: E251 'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth', ...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from docarray import BaseDoc from docarray.typing import Mesh3DUrl def test_set_mesh_url(): class MyDocument(BaseDoc): mesh_url: Mesh3DUrl d = MyDocument(mesh_url="https://jina.ai/mesh.obj") assert isinstance(d.mesh_url, Mesh3DUrl) assert d.mesh_url == "https://jina.ai/mesh.obj"
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Optional, List, Dict import hnswlib import numpy as np from jina import Executor, requests, DocumentArray, Document from jina_commons import get_logger from jina_commons.indexers.dump import import...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Optional, List, Dict import hnswlib import numpy as np from jina import Executor, requests, DocumentArray, Document from jina_commons import get_logger from jina_commons.indexers.dump import import...
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py'] # optimizer model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) optim_wrapper = dict( optimizer=dict(type='SGD', lr=0.01, momentum=0.9,...
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py'] # optimizer model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) optim_wrapper = dict( optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') tra...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') tra...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators. Notably, this cl...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primar...
from torch import Tensor from torch import nn from typing import Dict import os import json class Dropout(nn.Module): """Dropout layer. Args: dropout: Sets a dropout value for dense layer. """ def __init__(self, dropout: float = 0.2): super(Dropout, self).__init__() self.drop...
from torch import Tensor from torch import nn from typing import Dict import os import json class Dropout(nn.Module): """Dropout layer. :param dropout: Sets a dropout value for dense layer. """ def __init__(self, dropout: float = 0.2): super(Dropout, self).__init__() self.dropout = d...
import subprocess import sys from unittest.mock import patch import fastapi.cli import pytest def test_fastapi_cli(): result = subprocess.run( [ sys.executable, "-m", "coverage", "run", "-m", "fastapi", "dev", ...
import subprocess import sys from unittest.mock import patch import fastapi.cli import pytest def test_fastapi_cli(): result = subprocess.run( [ sys.executable, "-m", "coverage", "run", "-m", "fastapi", "dev", ...
""" Computes embeddings """ import numpy as np from sentence_transformers import SentenceTransformer def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None: """ Test that encode(output_value='token_embeddings') works """ model = paraphrase_distilroberta...
""" Computes embeddings """ import numpy as np from sentence_transformers import SentenceTransformer def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None: """ Test that encode(output_value='token_embeddings') works :return: """ model = paraphrase_...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model, not mutilingual but hope to see some on the hub soon m...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model, not mutilingual but hope to see some on the hub soon m...
# Copyright (c) OpenMMLab. All rights reserved. from .backbones import * # noqa: F401,F403 from .data_preprocessors import * # noqa: F401,F403 from .dense_heads import * # noqa: F401,F403 from .detectors import * # noqa: F401,F403 from .layers import * # noqa: F401,F403 from .losses import * # noqa: F401,F403 fro...
# Copyright (c) OpenMMLab. All rights reserved. from .backbones import * # noqa: F401,F403 from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, build_backbone, build_detector, build_head, build_loss, build_neck, ...
import numpy as np import pytest import torch from docarray import BaseDocument, DocumentArray from docarray.documents import Image, Text from docarray.typing import ( AnyEmbedding, AnyTensor, AnyUrl, ImageBytes, ImageUrl, Mesh3DUrl, NdArray, PointCloud3DUrl, TextUrl, TorchEmbed...
import numpy as np import torch from docarray import BaseDocument, DocumentArray from docarray.documents import Image, Text from docarray.typing import ( AnyEmbedding, AnyTensor, AnyUrl, ImageBytes, ImageUrl, Mesh3DUrl, NdArray, PointCloud3DUrl, TextUrl, TorchEmbedding, Torc...
# Copyright (c) OpenMMLab. All rights reserved. from .data_preprocessor import (BatchFixedSizePad, BatchResize, BatchSyncRandomResize, BoxInstDataPreprocessor, DetDataPreprocessor, MultiBranchDataPreprocessor) __all__ = [ ...
# Copyright (c) OpenMMLab. All rights reserved. from .data_preprocessor import (BatchFixedSizePad, BatchResize, BatchSyncRandomResize, DetDataPreprocessor, MultiBranchDataPreprocessor) __all__ = [ 'DetDataPreprocessor', 'BatchSyncRandomResize', 'Batch...
import asyncio import os import random import string import tempfile import time import pytest from jina import helper @pytest.fixture(scope='function') def random_workspace_name(): """Generate a random workspace name with digits and letters.""" rand = ''.join(random.choices(string.ascii_uppercase + string....
import asyncio import os import random import string import tempfile import time import pytest from jina import helper @pytest.fixture(scope='function') def random_workspace_name(): """Generate a random workspace name with digits and letters.""" rand = ''.join(random.choices(string.ascii_uppercase + string....
import importlib import pytest from fastapi.testclient import TestClient from ...utils import needs_py310, needs_pydanticv1 @pytest.fixture( name="client", params=[ "tutorial001_pv1", pytest.param("tutorial001_pv1_py310", marks=needs_py310), ], ) def get_client(request: pytest.FixtureReq...
import pytest from fastapi.testclient import TestClient from ...utils import needs_pydanticv1 @pytest.fixture(name="client") def get_client(): from docs_src.schema_extra_example.tutorial001_pv1 import app client = TestClient(app) return client @needs_pydanticv1 def test_post_body_example(client: TestC...
import logging from langchain_core.callbacks import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document from langchain_core.language_models import BaseLLM from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts imp...
import logging from langchain_core.callbacks import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document from langchain_core.language_models import BaseLLM from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts imp...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( bac...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( bac...
from typing import TYPE_CHECKING, List from docarray.typing.tensor.abstract_tensor import AbstractTensor if TYPE_CHECKING: from docarray.array import DocVec from docarray.array.any_array import AnyDocArray class DocArraySummary: def __init__(self, docs: 'AnyDocArray'): self.docs = docs def ...
from typing import TYPE_CHECKING, List from docarray.typing.tensor.abstract_tensor import AbstractTensor if TYPE_CHECKING: from docarray.array import DocVec from docarray.array.any_array import AnyDocArray class DocArraySummary: def __init__(self, docs: 'AnyDocArray'): self.docs = docs def ...
import prisma AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = { "Input": True, "Output": True, "Webhook": True, "AgentBlock": True, } AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = { "AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore } EXECUTION_RESULT_INCLUDE: prisma.types....
import prisma AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = { "Input": True, "Output": True, "Webhook": True, "AgentBlock": True, } AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = { "AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore } EXECUTION_RESULT_INCLUDE: prisma.types....
from __future__ import annotations from typing import Callable try: from typing import Self except ImportError: from typing_extensions import Self from torch import Tensor, nn from sentence_transformers.models.Module import Module from sentence_transformers.util import fullname, import_from_string class D...
from __future__ import annotations import json import os from typing import Callable import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import Tensor, nn from sentence_transformers.util import fullname, import_...
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple, Union import mmcv import numpy as np from mmengine.utils import is_str def palette_val(palette: List[tuple]) -> List[tuple]: """Convert palette to matplotlib palette. Args: palette (List[tuple]): A list of color tuples. ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple, Union import mmcv import numpy as np from mmengine.utils import is_str def palette_val(palette: List[tuple]) -> List[tuple]: """Convert palette to matplotlib palette. Args: palette (List[tuple]): A list of color tuples. ...
# Copyright (c) OpenMMLab. All rights reserved. from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, ...
# Copyright (c) OpenMMLab. All rights reserved. from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, ...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .two_stage import TwoStageDetector @MODELS.register_module() class GridRCNN(TwoStageDetector): """Grid R-CNN. This detector is the implementation of: ...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from .two_stage import TwoStageDetector @MODELS.register_module() class GridRCNN(TwoStageDetector): """Grid R-CNN. This detector is the implementation of: - Grid R-CNN (https://arxiv.org/abs/1811.12030) - Grid R-CNN Plu...
import csv import gzip import logging import os from datetime import datetime from torch.utils.data import DataLoader from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator #### Just some code...
import torch from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample from sentence_transformers import losses import os import gzip import csv from datetime import datetime import logging from torch.utils...
import os import shutil import pytest @pytest.fixture(scope="session", autouse=True) def download_cache(): os.system('scripts/download_full.sh') yield shutil.rmtree('.cache', ignore_errors=True)
import os import shutil import pytest @pytest.fixture(scope="session", autouse=True) def download_cache(): os.system('scripts/download_full.sh') yield shutil.rmtree('.cache')
from keras.src.api_export import keras_export from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"]) class MaxPooling3D(BasePooling): """Max pooling operation for 3D data (spatial or spatio-temporal). Downsamples the input along it...
from keras.src.api_export import keras_export from keras.src.layers.pooling.base_pooling import BasePooling @keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"]) class MaxPooling3D(BasePooling): """Max pooling operation for 3D data (spatial or spatio-temporal). Downsamples the input along it...
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
from __future__ import annotations from typing import Any, List from langchain_text_splitters.base import TextSplitter class NLTKTextSplitter(TextSplitter): """Splitting text using NLTK package.""" def __init__( self, separator: str = "\n\n", language: str = "english", *, ...
from __future__ import annotations from typing import Any, List from langchain_text_splitters.base import TextSplitter class NLTKTextSplitter(TextSplitter): """Splitting text using NLTK package.""" def __init__( self, separator: str = "\n\n", language: str = "english", *, ...
import io import logging from enum import Enum import replicate import replicate.exceptions import requests from prisma.models import AgentGraph from replicate.helpers import FileOutput from backend.data.graph import Graph from backend.util.settings import Settings logger = logging.getLogger(__name__) class ImageS...
import io import logging from enum import Enum import replicate import replicate.exceptions import requests from replicate.helpers import FileOutput from backend.data.graph import Graph from backend.util.settings import Settings logger = logging.getLogger(__name__) class ImageSize(str, Enum): LANDSCAPE = "1024...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from typing import Callable import pytest from jina import Flow from ...audioclip_text import AudioCLIPTextEncoder @pytest.mark.parametrize("request_size", [1, 10, 50, 100]) def test_integra...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Callable import pytest from jina import Flow from ...audioclip_text import AudioCLIPTextEncoder @pytest.mark.parametrize("request_size", [1, 10, 50, 100]) def test_integration(data_generator...
_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py' model = dict( data_preprocessor=dict( mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False)) train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}),...
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' model = dict( data_preprocessor=dict( mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False)) train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), ...
from typing import Type from docarray.proto import DocumentArrayProto, NodeProto from ..abstract_array import AbstractDocumentArray class ProtoArrayMixin(AbstractDocumentArray): @classmethod def from_protobuf( cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto' ) -> AbstractDocumentAr...
from typing import Type from docarray.proto import DocumentArrayProto, NodeProto from ..abstract_array import AbstractDocumentArray class ProtoArrayMixin(AbstractDocumentArray): @classmethod def from_protobuf( cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto' ) -> AbstractDocumentAr...
"""Init file of LlamaIndex.""" __version__ = "0.12.36" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
"""Init file of LlamaIndex.""" __version__ = "0.12.35" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import nn class LSTM(nn.Module): """Bidirectional LSTM running over word embeddings.""" def ...
import json import os from typing import List import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import nn class LSTM(nn.Module): """Bidirectional LSTM running over word embeddings.""" def __init__( ...
from keras.src import initializers from keras.src import ops from keras.src.api_export import keras_export from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Adagrad"]) class Adagrad(optimizer.Optimizer): """Optimizer that implements the Adagrad algorithm. Adagrad is an optimizer wit...
from keras.src import initializers from keras.src import ops from keras.src.api_export import keras_export from keras.src.optimizers import optimizer @keras_export(["keras.optimizers.Adagrad"]) class Adagrad(optimizer.Optimizer): """Optimizer that implements the Adagrad algorithm. Adagrad is an optimizer wit...
# Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict from mmcv.runner import get_dist_info from mmcv.runner.hooks import Hook from torch import nn from mmdet.registry import HOOKS from ..utils.dist_utils import all_reduce_dict def get_norm_states(module): async_norm_states = Order...
# Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict from mmcv.runner import get_dist_info from mmcv.runner.hooks import HOOKS, Hook from torch import nn from ..utils.dist_utils import all_reduce_dict def get_norm_states(module): async_norm_states = OrderedDict() for name, chi...
from typing import Any, Type, TypeVar, Union, cast import numpy as np from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal.misc import ( ...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast import numpy as np from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal....
""" This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph. Optionally, you can also provide a dev file. The fine-tuned model is stored in the output/model_name folder. Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt...
""" This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph. Optionally, you can also provide a dev file. The fine-tuned model is stored in the output/model_name folder. Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt...
# Copyright (c) OpenMMLab. All rights reserved. import functools import mmcv import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: ...
# Copyright (c) OpenMMLab. All rights reserved. import functools import mmcv import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: ...
from __future__ import annotations import pytest from sentence_transformers.cross_encoder import CrossEncoder @pytest.mark.parametrize( "model_name, expected_score", [ ("cross-encoder/ms-marco-MiniLM-L6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]), ("cross-encoder/ms-mar...
from __future__ import annotations import pytest from sentence_transformers.cross_encoder import CrossEncoder @pytest.mark.parametrize( "model_name, expected_score", [ ("cross-encoder/ms-marco-MiniLM-L-6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]), ("cross-encoder/ms-ma...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from typing import Optional import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, ...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from typing import Optional import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, ...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 11 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .registry import Registry # manage all kinds of runners like `EpochBasedRunner` an...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 11 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .registry import Registry # manage all kinds of runners like `EpochBasedRunner` an...
from enum import Enum from typing import Dict, Iterable import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """The metric for the contrastive loss""" EUCLIDEAN = lambda x, y: F.pairwise_dis...
from enum import Enum from typing import Iterable, Dict import torch.nn.functional as F from torch import nn, Tensor from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """The metric for the contrastive loss""" EUCLIDEAN = lambda x, y: F.pairwise_dista...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
class ConcurrentPushException(Exception): """Exception raised when a concurrent push is detected.""" pass
from .conv_emformer import ConvEmformer from .conv_tasnet import conv_tasnet_base from .hdemucs import HDemucs from .rnnt import conformer_rnnt_base, conformer_rnnt_model __all__ = [ "conformer_rnnt_base", "conformer_rnnt_model", "conv_tasnet_base", "ConvEmformer", "HDemucs", ]
from .conv_emformer import ConvEmformer from .conv_tasnet import conv_tasnet_base from .rnnt import conformer_rnnt_base, conformer_rnnt_model __all__ = [ "conformer_rnnt_base", "conformer_rnnt_model", "conv_tasnet_base", "ConvEmformer", ]
from __future__ import annotations import csv import gzip import os from . import InputExample class STSDataReader: """Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx) Default values expects a tab separated file with the first & second column...
from __future__ import annotations import csv import gzip import os from . import InputExample class STSDataReader: """Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx) Default values expects a tab separated file with the first & second column...
import os from pathlib import Path from typing import List, Tuple, Union import torchaudio from torch import Tensor from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.librispeech import _get_librispeech_metadata from torchaudio.datasets.utils import extract_archive...
import os from pathlib import Path from typing import List, Tuple, Union import torchaudio from torch import Tensor from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.librispeech import _get_librispeech_metadata from torchaudio.datasets.utils import extract_archive...
import numpy as np from docarray import BaseDoc from docarray.array.doc_vec.doc_vec import DocVec from docarray.typing import AnyTensor, NdArray def test_da_init(): class MyDoc(BaseDoc): tensor: AnyTensor name: str docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in range(4)] da =...
import numpy as np from docarray import BaseDoc from docarray.array.doc_vec.doc_vec import DocVec from docarray.typing import AnyTensor, NdArray def test_da_init(): class MyDoc(BaseDoc): tensor: AnyTensor name: str docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in range(4)] da =...