input
stringlengths
33
5k
output
stringlengths
32
5k
_base_ = './fovea_r50_fpn_4xb4-1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) train_...
_base_ = './fovea_r50_fpn_4xb4-1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) train_...
""" NumPy Array API compatibility library This is a small wrapper around NumPy, CuPy, JAX, sparse and others that are compatible with the Array API standard https://data-apis.org/array-api/latest/. See also NEP 47 https://numpy.org/neps/nep-0047-array-api-standard.html. Unlike array_api_strict, this is not a strict m...
""" NumPy Array API compatibility library This is a small wrapper around NumPy, CuPy, JAX, sparse and others that are compatible with the Array API standard https://data-apis.org/array-api/latest/. See also NEP 47 https://numpy.org/neps/nep-0047-array-api-standard.html. Unlike array_api_strict, this is not a strict m...
_base_ = [ '../common/ms-poly_3x_coco-instance.py', '../_base_/models/mask-rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=Tr...
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_gr...
_base_ = './yolov3_d53_mstrain-608_273e_coco.py' # fp16 settings optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic')
_base_ = './yolov3_d53_mstrain-608_273e_coco.py' # fp16 settings fp16 = dict(loss_scale='dynamic')
# Copyright (c) OpenMMLab. All rights reserved. third_part_libs = [ 'pip install -r ../requirements/albu.txt', 'pip install instaboostfast', 'pip install git+https://github.com/cocodataset/panopticapi.git', 'pip install timm', 'pip install mmcls>=1.0.0rc0', 'pip install git+https://github.com/l...
# Copyright (c) OpenMMLab. All rights reserved. third_part_libs = [ 'pip install -r ../requirements/albu.txt', 'pip install instaboostfast', 'pip install git+https://github.com/cocodataset/panopticapi.git', 'pip install timm', 'pip install mmcls>=1.0.0rc0', 'pip install git+https://github.com/l...
from llama_index.llms.openai import OpenAI from llama_index.multi_modal_llms.openai import OpenAIMultiModal def test_embedding_class(): names_of_base_classes = [b.__name__ for b in OpenAIMultiModal.__mro__] assert OpenAI.__name__ in names_of_base_classes
from llama_index.core.multi_modal_llms.base import MultiModalLLM from llama_index.multi_modal_llms.openai import OpenAIMultiModal def test_embedding_class(): names_of_base_classes = [b.__name__ for b in OpenAIMultiModal.__mro__] assert MultiModalLLM.__name__ in names_of_base_classes
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import numpy as np import pytest from jina import Document, DocumentArray, Flow from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import export_dump_stream...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import numpy as np import pytest from jina import Document, DocumentArray, Flow from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import export_dump_stream...
import gzip from os import PathLike from pathlib import Path from typing import Union import pytest import yaml from vcr import VCR from vcr.persisters.filesystem import CassetteNotFoundError from vcr.request import Request class CustomSerializer: """Custom serializer for VCR cassettes using YAML and gzip. ...
import gzip from os import PathLike from pathlib import Path from typing import Union import pytest import yaml from vcr import VCR from vcr.persisters.filesystem import CassetteNotFoundError from vcr.request import Request class CustomSerializer: """Custom serializer for VCR cassettes using YAML and gzip. ...
from __future__ import annotations import re import pytest from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import NanoBEIREvaluator from sentence_transformers.util import is_datasets_available if not is_datasets_available(): pytest.skip( reason="Datasets are n...
from __future__ import annotations import re import pytest from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import NanoBEIREvaluator def test_nanobeir_evaluator(): """Tests that the NanoBERTEvaluator can be loaded and produces expected metrics""" datasets = ["Quor...
# Copyright (c) OpenMMLab. All rights reserved. import os from typing import Optional import torch def get_max_cuda_memory(device: Optional[torch.device] = None) -> int: """Returns the maximum GPU memory occupied by tensors in megabytes (MB) for a given device. By default, this returns the peak allocated mem...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch def get_max_cuda_memory(device: Optional[torch.device] = None) -> int: """Returns the maximum GPU memory occupied by tensors in megabytes (MB) for a given device. By default, this returns the peak allocated memory since ...
""" Computes embeddings """ from __future__ import annotations import numpy as np import pytest from sentence_transformers import SentenceTransformer @pytest.mark.skip( "This test fails if optimum.intel.openvino is imported, because openvinotoolkit/nncf " "patches torch._C._nn.gelu in a way that breaks pic...
""" Computes embeddings """ from __future__ import annotations import numpy as np import pytest from sentence_transformers import SentenceTransformer @pytest.mark.parametrize("normalize_embeddings", (False, True)) @pytest.mark.parametrize("prompt_name", (None, "retrieval")) def test_encode_multi_process( stsb_...
import numpy as np import pytest from docarray import BaseDoc, DocList from docarray.base_doc import AnyDoc from docarray.documents import ImageDoc, TextDoc from docarray.typing import NdArray @pytest.mark.proto def test_simple_proto(): class CustomDoc(BaseDoc): text: str tensor: NdArray da ...
import numpy as np import pytest from docarray import BaseDoc, DocList from docarray.documents import ImageDoc, TextDoc from docarray.typing import NdArray @pytest.mark.proto def test_simple_proto(): class CustomDoc(BaseDoc): text: str tensor: NdArray da = DocList( [CustomDoc(text='h...
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import Tensor, nn class WeightedLayerPooling(nn.Module): """Token embeddings are weighted mean of...
import json import os from typing import Dict import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import Tensor, nn class WeightedLayerPooling(nn.Module): """Token embeddings are weighted mean of their diffe...
import pytest from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface @pytest.mark.integration def test_available_models() -> None: models = Interface().available_models assert models assert isinstance(models, list) assert all(isinstance(model.id, str) for model in models)
import pytest from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface @pytest.mark.integration() def test_available_models() -> None: models = Interface().available_models assert models assert isinstance(models, list) assert all(isinstance(model.id, str) for model in models)
_base_ = './retinanet_r50_fpn_1x_coco.py' # MMEngine support the following two ways, users can choose # according to convenience # optim_wrapper = dict(type='AmpOptimWrapper') _base_.optim_wrapper.type = 'AmpOptimWrapper'
_base_ = './retinanet_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.)
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging logger = logging.get_logger(__name__) class ParallelBackendConfig: backend_name = None @experimental def parallel_map(function, iterable, num_proc, batched, batch_size, types, disab...
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging logger = logging.get_logger(__name__) class ParallelBackendConfig: backend_name = None @experimental def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 11 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .registry import Registry # manage all kinds of runners like `EpochBasedRunner` an...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 11 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .registry import Registry # manage all kinds of runners like `EpochBasedRunner` an...
import unittest import torch from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from .functional_impl import Functional, FunctionalCUDAOnly @skipIfNoCuda class TestFunctionalFloat32(Functional, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda") @unittest.expec...
import unittest import torch from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from .functional_impl import Functional @skipIfNoCuda class TestFunctionalFloat32(Functional, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda") @unittest.expectedFailure def t...
import pytest import torch from mmdet.models.backbones.pvt import (PVTEncoderLayer, PyramidVisionTransformer, PyramidVisionTransformerV2) def test_pvt_block(): # test PVT structure and forward block = PVTEncoderLayer( emb...
import pytest import torch from mmdet.models.backbones.pvt import (PVTEncoderLayer, PyramidVisionTransformer, PyramidVisionTransformerV2) def test_pvt_block(): # test PVT structure and forward block = PVTEncoderLayer( emb...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import ConvModule from mmcv.cnn.bricks import DropPath from mmcv.runner import BaseModule from .se_layer import SELayer class InvertedResidual(BaseModule): """Inverted Residual Block. Args...
# Copyright (c) OpenMMLab. All rights reserved. import torch.utils.checkpoint as cp from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from .se_layer import SELayer class InvertedResidual(BaseModule): """Inverted Residual Block. Args: in_channels (int): The input channels of this Mod...
from typing import Any, Dict, List, Optional, Sequence, Tuple from llama_index.core.base.llms.types import ChatMessage, MessageRole from llama_index.core.base.llms.generic_utils import get_from_param_or_env DEFAULT_FIREWORKS_API_BASE = "https://api.fireworks.ai/inference/v1" DEFAULT_FIREWORKS_API_VERSION = "" LLAMA_...
from typing import Any, Dict, List, Optional, Sequence, Tuple from llama_index.core.base.llms.types import ChatMessage, MessageRole from llama_index.core.base.llms.generic_utils import get_from_param_or_env DEFAULT_FIREWORKS_API_BASE = "https://api.fireworks.ai/inference/v1" DEFAULT_FIREWORKS_API_VERSION = "" LLAMA_...
""" Sphinx Read the Docs theme. From https://github.com/ryan-roemer/sphinx-bootstrap-theme. """ from os import path import sphinx __version__ = "0.5.0" __version_full__ = __version__ def get_html_theme_path(): """Return list of HTML theme paths.""" cur_dir = path.abspath(path.dirname(path.dirname(__file__...
""" Sphinx Read the Docs theme. From https://github.com/ryan-roemer/sphinx-bootstrap-theme. """ from os import path import sphinx __version__ = "0.5.0" __version_full__ = __version__ def get_html_theme_path(): """Return list of HTML theme paths.""" cur_dir = path.abspath(path.dirname(path.dirname(__file_...
"""Test chat model integration.""" import json from collections.abc import Generator from contextlib import contextmanager from typing import Any from unittest.mock import patch import pytest from httpx import Client, Request, Response from langchain_core.messages import ChatMessage from langchain_tests.unit_tests im...
"""Test chat model integration.""" import json from collections.abc import Generator from contextlib import contextmanager from typing import Any import pytest from httpx import Client, Request, Response from langchain_core.messages import ChatMessage from langchain_tests.unit_tests import ChatModelUnitTests from la...
""" This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage: python trai...
""" This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage: python trai...
__version__ = '0.13.8' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
__version__ = '0.13.7' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
import asyncio import copy from typing import Any, List, TYPE_CHECKING from jina.serve.runtimes.servers import BaseServer if TYPE_CHECKING: from jina.logging.logger import JinaLogger class CompositeBaseServer(BaseServer): """Composite Base Server implementation from which u can inherit a specific custom com...
import asyncio import copy from typing import Any, List, TYPE_CHECKING from jina.serve.runtimes.servers import BaseServer if TYPE_CHECKING: from jina.logging.logger import JinaLogger class CompositeBaseServer(BaseServer): """Composite Base Server implementation from which u can inherit a specific custom com...
from pathlib import Path import numpy as np import scipy from jina import Document, DocumentArray, Executor from ...tfidf_text_executor import TFIDFTextEncoder def test_config(): ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) assert ex.path_vectorizer.endswith('tfidf_vectorizer.pic...
import os import numpy as np import scipy from jina import Executor, Document, DocumentArray from ...tfidf_text_executor import TFIDFTextEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_tfidf(): encoder = Executor.load_config(os.path.join(cur_dir, '../../config.yml')) assert encoder.pat...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn from autogpt_libs.feature_flag.client import ( initialize_launchdarkly, shutdown_launchdarkly, ) import backend.data.block import backend.data.db import backend.data.graph imp...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn from autogpt_libs.feature_flag.client import ( initialize_launchdarkly, shutdown_launchdarkly, ) import backend.data.block import backend.data.db import backend.data.graph imp...
import numpy as np from pydantic.tools import parse_obj_as, schema_json_of from docarray.document.io.json import orjson_dumps from docarray.typing import AnyEmbedding def test_proto_embedding(): embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224))) embedding._to_node_protobuf() def test_json_sc...
import numpy as np from pydantic.tools import parse_obj_as, schema_json_of from docarray.document.io.json import orjson_dumps from docarray.typing import Embedding def test_proto_embedding(): embedding = parse_obj_as(Embedding, np.zeros((3, 224, 224))) embedding._to_node_protobuf() def test_json_schema()...
from abc import ABC, abstractmethod from typing import Callable, List, Sequence, Optional, Union, Any from llama_index.core.agent.workflow.workflow_events import ( AgentOutput, ToolCallResult, ) from llama_index.core.bridge.pydantic import ( BaseModel, Field, ConfigDict, field_validator, ) from...
from abc import ABC, abstractmethod from typing import Callable, List, Sequence, Optional, Union, Any from llama_index.core.agent.workflow.workflow_events import ( AgentOutput, ToolCallResult, ) from llama_index.core.bridge.pydantic import ( BaseModel, Field, ConfigDict, field_validator, ) from...
# Copyright (c) OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean, sync_random_seed) from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor, generate_coordinate, mask2ndarray, multi_apply,...
# Copyright (c) OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean, sync_random_seed) from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor, generate_coordinate, mask2ndarray, multi_apply,...
# Copyright (c) OpenMMLab. All rights reserved. from .base_data_element import BaseDataElement from .instance_data import InstanceData from .sampler import DefaultSampler, InfiniteSampler from .utils import pseudo_collate, worker_init_fn __all__ = [ 'BaseDataElement', 'DefaultSampler', 'InfiniteSampler', 'worker_i...
# Copyright (c) OpenMMLab. All rights reserved. from .base_data_element import BaseDataElement from .sampler import DefaultSampler, InfiniteSampler from .utils import pseudo_collate, worker_init_fn __all__ = [ 'BaseDataElement', 'DefaultSampler', 'InfiniteSampler', 'worker_init_fn', 'pseudo_collate' ]
# Copyright (c) OpenMMLab. All rights reserved. from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset, ADE20KSegDataset) from .base_det_dataset import BaseDetDataset from .base_semseg_dataset import BaseSegDataset from .base_video_dataset import BaseVideoDataset from .cityscapes import ...
# Copyright (c) OpenMMLab. All rights reserved. from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset, ADE20KSegDataset) from .base_det_dataset import BaseDetDataset from .base_semseg_dataset import BaseSegDataset from .base_video_dataset import BaseVideoDataset from .cityscapes import ...
from tempfile import NamedTemporaryFile import huggingface_hub import pytest import requests from packaging import version from datasets.utils.file_utils import fsspec_get, fsspec_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline, require_not_windows @pytest.mark.integration...
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def test_offline_with_timeout(): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT): with pytest.raises(Reques...
""" Computes embeddings """ import numpy as np from sentence_transformers import SentenceTransformer def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None: """ Test that encode(output_value='token_embeddings') works """ model = paraphrase_distilroberta...
""" Computes embeddings """ import numpy as np from sentence_transformers import SentenceTransformer def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None: """ Test that encode(output_value='token_embeddings') works :return: """ model = paraphrase_...
import copy from typing import Dict, Tuple _SPECIFIC_EXECUTOR_SEPARATOR = '__' def _spit_key_and_executor_name(key_name: str) -> Tuple[str]: """Split a specific key into a key, name pair ex: 'key__my_executor' will be split into 'key', 'my_executor' :param key_name: key name of the param :return: r...
import copy from typing import Any, Dict, List, Tuple _SPECIFIC_EXECUTOR_SEPARATOR = '__' def _spit_key_and_executor_name(key_name: str) -> Tuple[str]: """Split a specific key into a key, name pair ex: 'key__my_executor' will be split into 'key', 'my_executor' :param key_name: key name of the param ...
import os import pytest import torch import whisper @pytest.mark.parametrize("model_name", whisper.available_models()) def test_transcribe(model_name: str): device = "cuda" if torch.cuda.is_available() else "cpu" model = whisper.load_model(model_name).to(device) audio_path = os.path.join(os.path.dirname...
import os import pytest import whisper @pytest.mark.parametrize('model_name', whisper.available_models()) def test_transcribe(model_name: str): model = whisper.load_model(model_name).cuda() audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac") language = "en" if model_name.endswith(".en") el...
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to...
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to...
__version__ = '0.18.0' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
__version__ = '0.17.1' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
from typing import Any, Optional from typing_extensions import get_origin from typing_inspect import get_args, is_typevar, is_union_type from docarray.typing.id import ID from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True if type is a type Te...
from typing import Any, Optional from typing_extensions import get_origin from typing_inspect import get_args, is_typevar, is_union_type from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True if type is a type Tensor or an Optional Tensor type.""...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../common/lsj_100e_coco_instance.py' ] image_size = (1024, 1024) batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # Syn...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../common/lsj_100e_coco_instance.py' ] image_size = (1024, 1024) batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # Syn...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_doc import BaseDoc from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.image.image_tensor import ImageTensor from docarr...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.image.image_tensor import ImageTensor f...
""" In SecGPT, all messages exchanged among spokes conform to predefined formats, encapsulated within the Message class. """ import json class Message: @staticmethod def function_probe_request(spoke_id, function): """ Create a function probe request message. Args: spoke_id...
""" In SecGPT, all messages exchanged among spokes conform to predefined formats, encapsulated within the Message class. """ import json class Message: @staticmethod def function_probe_request(spoke_id, function): """ Create a function probe request message. Args: spoke_id...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from sentence_transformers.evaluation import TranslationEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder logger = ...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from sentence_transformers.evaluation import TranslationEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder logger = ...
import logging import prisma.types logger = logging.getLogger(__name__) async def log_raw_analytics( user_id: str, type: str, data: dict, data_index: str, ): details = await prisma.models.AnalyticsDetails.prisma().create( data=prisma.types.AnalyticsDetailsCreateInput( userId=...
import logging import prisma.types logger = logging.getLogger(__name__) async def log_raw_analytics( user_id: str, type: str, data: dict, data_index: str, ): details = await prisma.models.AnalyticsDetails.prisma().create( data={ "userId": user_id, "type": type, ...
"""Astra DB.""" from typing import Any, List, Optional import llama_index.core from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class AstraDBReader(BaseReader): """ Astra DB reader. Retrieve documents from an Astra DB Instance. Args: collect...
"""Astra DB.""" from typing import Any, List, Optional import llama_index.core from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class AstraDBReader(BaseReader): """Astra DB reader. Retrieve documents from an Astra DB Instance. Args: collection_n...
# dataset settings dataset_type = 'CocoPanopticDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='dis...
# dataset settings dataset_type = 'CocoPanopticDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='dis...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import numpy as np import torch from mmengine.config import Config, DictAction from mmdet.registry import MODELS from mmdet.utils import register_all_modules try: from mmcv.cnn import get_model_complexity_info except ImportError: raise ImportErr...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import numpy as np import torch from mmengine.config import Config, DictAction from mmdet.models import build_detector try: from mmcv.cnn import get_model_complexity_info except ImportError: raise ImportError('Please upgrade mmcv to >0.6.2') d...
from langchain_core.prompts import PromptTemplate prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. {context} Question: {question} Helpful Answer:""" # noqa: E501 PROMPT = PromptTemp...
# flake8: noqa from langchain_core.prompts import PromptTemplate prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. {context} Question: {question} Helpful Answer:""" PROMPT = PromptTem...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import torch.nn.functional as F import torchvision import torchvision.transforms as transforms from torch.optim import SGD from mmengine.evaluator import BaseMetric from mmengine.model import BaseModel from mmengine.runner import Runner class MMResNet5...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import torch.nn.functional as F import torchvision import torchvision.transforms as transforms from torch.optim import SGD from mmengine.evaluator import BaseMetric from mmengine.model import BaseModel from mmengine.runner import Runner class MMResNet5...
from ._source_separation_pipeline import ( CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS, SourceSeparationBundle, ) from ._squim_pipeline import SQUIM_OBJECTIVE, SQUIM_SUBJECTIVE, SquimObjectiveBundle, SquimSubjectiveBundle from ._tts import ( TACOTRON2_GRIFFINLIM_CHAR_LJSPE...
from ._source_separation_pipeline import ( CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS, SourceSeparationBundle, ) from ._squim_pipeline import SQUIM_OBJECTIVE, SQUIM_SUBJECTIVE, SquimObjectiveBundle, SquimSubjectiveBundle from ._tts import ( TACOTRON2_GRIFFINLIM_CHAR_LJSPE...
"""Argparser module for WorkerRuntime""" from jina.parsers.helper import KVAppendAction, add_arg_group from jina.parsers.orchestrate.runtimes.grpc_channel import ( mixin_grpc_channel_options_parser, ) from jina.parsers.orchestrate.runtimes.runtime import ( mixin_base_runtime_parser, mixin_raft_parser, ) ...
"""Argparser module for WorkerRuntime""" from jina.parsers.helper import KVAppendAction, add_arg_group from jina.parsers.orchestrate.runtimes.grpc_channel import ( mixin_grpc_channel_options_parser, ) from jina.parsers.orchestrate.runtimes.runtime import ( mixin_base_runtime_parser, mixin_raft_parser, ) ...
""" Official evaluation script for ReCoRD v1.0. (Some functions are adopted from the SQuAD evaluation script.) """ import argparse import json import re import string import sys from collections import Counter def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" d...
""" Official evaluation script for ReCoRD v1.0. (Some functions are adopted from the SQuAD evaluation script.) """ import argparse import json import re import string import sys from collections import Counter def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" ...
import re import unicodedata import regex # non-ASCII letters that are not separated by "NFKD" normalization ADDITIONAL_DIACRITICS = { "œ": "oe", "Œ": "OE", "ø": "o", "Ø": "O", "æ": "ae", "Æ": "AE", "ß": "ss", "ẞ": "SS", "đ": "d", "Đ": "D", "ð": "d", "Ð": "D", "þ": ...
import re import unicodedata import regex # non-ASCII letters that are not separated by "NFKD" normalization ADDITIONAL_DIACRITICS = { "œ": "oe", "Œ": "OE", "ø": "o", "Ø": "O", "æ": "ae", "Æ": "AE", "ß": "ss", "ẞ": "SS", "đ": "d", "Đ": "D", "ð": "d", "Ð": "D", "þ": ...
# Copyright (c) OpenMMLab. All rights reserved. """Collecting some commonly used type hint in mmdetection.""" from typing import List, Optional, Sequence, Tuple, Union from mmengine.config import ConfigDict from mmengine.structures import InstanceData, PixelData # TODO: Need to avoid circular import with assigner and...
# Copyright (c) OpenMMLab. All rights reserved. """Collecting some commonly used type hint in mmdetection.""" from typing import List, Optional, Sequence, Tuple, Union from mmengine.config import ConfigDict from mmengine.data import InstanceData, PixelData # TODO: Need to avoid circular import with assigner and sampl...
""" ===================================== How to write your own Datapoint class ===================================== This guide is intended for downstream library maintainers. We explain how to write your own datapoint class, and how to make it compatible with the built-in Torchvision v2 transforms. Before continuing...
""" ===================================== How to write your own Datapoint class ===================================== This guide is intended for downstream library maintainers. We explain how to write your own datapoint class, and how to make it compatible with the built-in Torchvision v2 transforms. Before continuing...
import os from source_separation.utils.dataset import wsj0mix from torchaudio_unittest.common_utils import ( get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase, ) _FILENAMES = [ "012c0207_1.9952_01cc0202_-1.9952.wav", "01co0302_1.63_014c020q_-1.63.wav", "01do031...
import os from source_separation.utils.dataset import wsj0mix from torchaudio_unittest.common_utils import ( TempDirMixin, TorchaudioTestCase, get_whitenoise, save_wav, normalize_wav, ) _FILENAMES = [ "012c0207_1.9952_01cc0202_-1.9952.wav", "01co0302_1.63_014c020q_-1.63.wav", "01do031...
from langchain_core.agents import AgentActionMessageLog from langchain_core.messages import AIMessage, FunctionMessage from langchain.agents.format_scratchpad.openai_functions import ( format_to_openai_function_messages, ) def test_calls_convert_agent_action_to_messages() -> None: additional_kwargs1 = { ...
from langchain_core.agents import AgentActionMessageLog from langchain_core.messages import AIMessage, FunctionMessage from langchain.agents.format_scratchpad.openai_functions import ( format_to_openai_function_messages, ) def test_calls_convert_agent_action_to_messages() -> None: additional_kwargs1 = { ...
from __future__ import annotations import re import pytest from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import NanoBEIREvaluator from sentence_transformers.util import is_datasets_available if not is_datasets_available(): pytest.skip( reason="Datasets are n...
from __future__ import annotations import re import pytest from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import NanoBEIREvaluator def test_nanobeir_evaluator(): """Tests that the NanoBERTEvaluator can be loaded and produces expected metrics""" datasets = ["Quor...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Optional import torch from jina import DocumentArray, Executor, requests from .audio_clip.model import AudioCLIP class AudioCLIPTextEncoder(Executor): """ Encode text data with the ...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Iterable, Optional import torch from jina import DocumentArray, Executor, requests from .audio_clip.model import AudioCLIP class AudioCLIPTextEncoder(Executor): """ Encode text data...
from typing import Any, Optional, Sequence from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType from tonic_validate.metrics.augmentation_precision_metric import ( AugmentationPrecisionMetric, ) from tonic_validate.s...
from typing import Any, Optional, Sequence from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType from tonic_validate.metrics.augmentation_precision_metric import ( AugmentationPrecisionMetric, ) from tonic_validate.s...
"""Module containing the base parser for arguments of Jina.""" import argparse from jina.parsers.helper import _chf def set_base_parser(): """Set the base parser :return: the parser """ from jina import __version__ from jina.helper import colored, format_full_version_info, get_full_version ...
"""Module containing the base parser for arguments of Jina.""" import argparse from jina.parsers.helper import _chf def set_base_parser(): """Set the base parser :return: the parser """ from jina import __version__ from jina.helper import colored, format_full_version_info, get_full_version ...
import PIL.Image import pytest import torch import torchvision.transforms.v2.utils from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image from torchvision import datapoints from torchvision.transforms.v2.functional import to_pil_image from torchvision.transforms.v2.utils import h...
import PIL.Image import pytest import torch import torchvision.transforms.v2.utils from common_utils import DEFAULT_SIZE, make_bounding_box, make_detection_mask, make_image from torchvision import datapoints from torchvision.transforms.v2.functional import to_pil_image from torchvision.transforms.v2.utils import has...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDoc from docarray.documents import ImageDoc from docarray.typing import ImageBytes from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow ...
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests."""
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests.""" pass
from __future__ import annotations try: from typing import Self except ImportError: from typing_extensions import Self import torch import transformers from PIL import Image from sentence_transformers.models.Asym import InputModule class CLIPModel(InputModule): save_in_root: bool = True def __init...
from __future__ import annotations import torch import transformers from PIL import Image from torch import nn class CLIPModel(nn.Module): save_in_root: bool = True def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None: super().__init__() if proce...
import importlib.util import warnings from functools import wraps from typing import Optional import torch def is_module_available(*modules: str) -> bool: r"""Returns if a top-level module with :attr:`name` exists *without** importing it. This is generally safer than try-catch block around a `import X`. ...
import importlib.util import warnings from functools import wraps from typing import Optional import torch def is_module_available(*modules: str) -> bool: r"""Returns if a top-level module with :attr:`name` exists *without** importing it. This is generally safer than try-catch block around a `import X`. ...
"""Init file of LlamaIndex.""" __version__ = "0.12.19" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
"""Init file of LlamaIndex.""" __version__ = "0.12.18" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
from __future__ import annotations import random import pytest import torch from datasets import Dataset from torch.utils.data import ConcatDataset from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler @pytest.fixture def dummy_dataset() -> Dataset: """ Dummy dataset ...
from __future__ import annotations import random import pytest from datasets import Dataset from sentence_transformers.sampler import NoDuplicatesBatchSampler @pytest.fixture def dummy_dataset(): """ Dummy dataset for testing purposes. The dataset looks as follows: { "data": [0, 47, 3, 30, 3, ....
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import FSAFHead def test_fsaf_head_loss(): """Tests anchor head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_...
import mmcv import torch from mmdet.models.dense_heads import FSAFHead def test_fsaf_head_loss(): """Tests anchor head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] cfg = dict( ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.utilities import ArceeWrapper from langchain_community.utilities.arcee import ( ArceeDocument, ArceeDocumentAdapter, ArceeDocumentSource, ArceeRoute, ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.utilities import ArceeWrapper from langchain_community.utilities.arcee import ( ArceeDocument, ArceeDocumentAdapter, ArceeDocumentSource, ArceeRoute, ...
"""Snowflake Query Engine Pack.""" import os from typing import Any, Dict, List from llama_index.core import SQLDatabase from llama_index.core.indices.struct_store.sql_query import NLSQLTableQueryEngine from llama_index.core.llama_pack.base import BaseLlamaPack from sqlalchemy import create_engine class SnowflakeQu...
"""Snowflake Query Engine Pack.""" import os from typing import Any, Dict, List from llama_index.core import SQLDatabase from llama_index.core.indices.struct_store.sql_query import NLSQLTableQueryEngine from llama_index.core.llama_pack.base import BaseLlamaPack from sqlalchemy import create_engine class SnowflakeQu...
import wave from typing import TYPE_CHECKING, Any, Type, TypeVar, Union import numpy as np from pydantic import parse_obj_as from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16, AudioNdArray from docarray.typing.url.any_url import AnyUrl if TYPE_CHECKING: from pydantic import BaseConfig from py...
import wave from typing import TYPE_CHECKING, Any, Type, TypeVar, Union import numpy as np from pydantic import parse_obj_as from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16, AudioNdArray from docarray.typing.url.any_url import AnyUrl if TYPE_CHECKING: from pydantic import BaseConfig from py...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_tsdae_from_file.py path/to/sentences.txt """ import gzip ...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_tsdae_from_file.py path/to/sentences.txt """ from sentenc...
from typing import Any, Dict, List, Optional, Tuple from copy import deepcopy from presidio_anonymizer.operators import Operator, OperatorType from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle from presidio_analyzer impo...
from typing import Any, Dict, List, Optional, Tuple from copy import deepcopy from presidio_anonymizer.operators import Operator, OperatorType from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle from presidio_analyzer impo...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from sentence_transformers.evaluation import TranslationEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder logger = ...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from sentence_transformers.evaluation import TranslationEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder logger = ...
import logging from langchain_core.callbacks import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document from langchain_core.language_models import BaseLLM from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts imp...
import logging from langchain_core.callbacks import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document from langchain_core.language_models import BaseLLM from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts imp...
from dataclasses import dataclass from functools import partial from typing import Callable import torch import torchaudio from torchaudio.models import conv_tasnet_base, hdemucs_high @dataclass class SourceSeparationBundle: """torchaudio.pipelines.SourceSeparationBundle() Dataclass that bundles components...
from dataclasses import dataclass from functools import partial from typing import Callable import torch import torchaudio from torchaudio.models import conv_tasnet_base, hdemucs_high @dataclass class SourceSeparationBundle: """torchaudio.pipelines.SourceSeparationBundle() Dataclass that bundles components...
"""MistralAI embeddings file.""" from typing import Any, List, Optional from llama_index.core.base.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, ) from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.callbacks.base import CallbackManager from llama_index.core.base...
"""MistralAI embeddings file.""" from typing import Any, List, Optional from llama_index.core.base.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, ) from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.callbacks.base import CallbackManager from llama_index.core.base...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import collect_env as collect_base_env from mmcv.utils import get_git_hash import mmdet def collect_env(): """Collect the information of the running environments.""" env_info = collect_base_env() env_info['MMDetection'] = mmdet.__version__ +...
from mmcv.utils import collect_env as collect_base_env from mmcv.utils import get_git_hash import mmdet def collect_env(): """Collect the information of the running environments.""" env_info = collect_base_env() env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7] return env_info ...
from docarray import Document, DocumentArray import numpy as np def find_random(da, target_certainty): return da.find( DocumentArray([Document(embedding=np.random.randint(10, size=10))]), query_params={"certainty": target_certainty}, additional=['certainty'], )[0] def test_certainty_...
from docarray import Document, DocumentArray import numpy as np def find_random(da, target_certainty): return da.find( DocumentArray([Document(embedding=np.random.randint(10, size=10))]), query_params={"certainty": target_certainty}, )[0] def test_certainty_filter(start_storage): nrof_do...
""" Computes embeddings """ import numpy as np import pytest from typing import Optional from sentence_transformers import SentenceTransformer @pytest.mark.parametrize("normalize_embeddings", (False, True)) @pytest.mark.parametrize("prompt_name", (None, "retrieval")) def test_encode_multi_process( stsb_bert_tin...
""" Computes embeddings """ import numpy as np import pytest from typing import Optional from sentence_transformers import SentenceTransformer @pytest.mark.parametrize("normalize_embeddings", (False, True)) @pytest.mark.parametrize("prompt_name", (None, "retrieval")) def test_encode_multi_process( stsb_bert_ti...
from docarray.typing.id import ID from docarray.typing.tensor.audio import AudioNdArray from docarray.typing.tensor.embedding.embedding import AnyEmbedding from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.typing.url import ( AnyUrl, AudioUrl, ...
from docarray.typing.id import ID from docarray.typing.tensor.audio import AudioNdArray from docarray.typing.tensor.embedding.embedding import Embedding from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.typing.url import ( AnyUrl, AudioUrl, ...
import json import os from typing import Optional, Type from llama_index.core.download.integration import download_integration from llama_index.core.download.pack import ( LLAMA_PACKS_CONTENTS_URL, download_llama_pack_template, track_download, ) from llama_index.core.llama_pack.base import BaseLlamaPack ...
import json import os from typing import Optional, Type from llama_index.core.download.integration import download_integration from llama_index.core.download.pack import ( LLAMA_PACKS_CONTENTS_URL, download_llama_pack_template, track_download, ) from llama_index.core.llama_pack.base import BaseLlamaPack ...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from typing import List, Optional, TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.mimetypes import TEXT_EXTRA_EXTENSIONS, TEXT_MIMETYPE T = TypeVar('T', bound='TextUrl') @_register_proto(proto_type_name='text_url') class Tex...
import numpy as np from docarray import BaseDoc from docarray.array.stacked.array_stacked import DocArrayStacked from docarray.typing import AnyTensor, NdArray def test_da_init(): class MyDoc(BaseDoc): tensor: AnyTensor name: str docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in rang...
import numpy as np from docarray import BaseDocument from docarray.array.stacked.array_stacked import DocumentArrayStacked from docarray.typing import AnyTensor, NdArray def test_da_init(): class MyDoc(BaseDocument): tensor: AnyTensor name: str docs = [MyDoc(tensor=np.zeros(10), name='hello'...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import numpy as np import pytest from jina import Document, DocumentArray from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import import_vectors from .. import AnnoySearch...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import numpy as np import pytest from jina import Document, DocumentArray from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import import_vectors from .. import AnnoySearch...
from llama_index.llms.huggingface.base import ( HuggingFaceInferenceAPI, HuggingFaceLLM, TextGenerationInference, ) __all__ = ["HuggingFaceLLM", "HuggingFaceInferenceAPI", "TextGenerationInference"]
from llama_index.llms.huggingface.base import ( HuggingFaceLLM, ) __all__ = ["HuggingFaceLLM"]
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union import numpy as np from pydantic import Field from docarray.base_doc import BaseDoc from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.i...
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union import numpy as np from pydantic import Field from docarray.base_doc import BaseDoc from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.i...
from docarray.array.document import DocumentArray
from .document import DocumentArray
from docarray.typing.id import ID from docarray.typing.tensor.embedding.embedding import Embedding from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.typing.url import AnyUrl, ImageUrl, Mesh3DUrl, PointCloud3DUrl, TextUrl __all__ = [ 'NdArray', ...
from docarray.typing.id import ID from docarray.typing.tensor import AnyTensor, NdArray from docarray.typing.tensor.embedding import Embedding from docarray.typing.url import AnyUrl, ImageUrl, Mesh3DUrl, PointCloud3DUrl, TextUrl __all__ = [ 'NdArray', 'Embedding', 'ImageUrl', 'TextUrl', 'Mesh3DUrl'...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class FCOS(SingleStageDetector): """Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_""" def __init__(self, backbone, ...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class FCOS(SingleStageDetector): """Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_""" def __init__(self, backbone, ...
from .image_tf_encoder import ImageTFEncoder
from .image_tf_encoder import ImageTFEncoder
import importlib.util import warnings from functools import wraps from typing import Optional def is_module_available(*modules: str) -> bool: r"""Returns if a top-level module with :attr:`name` exists *without** importing it. This is generally safer than try-catch block around a `import X`. It avoids thir...
import importlib.util import warnings from functools import wraps from typing import Optional def is_module_available(*modules: str) -> bool: r"""Returns if a top-level module with :attr:`name` exists *without** importing it. This is generally safer than try-catch block around a `import X`. It avoids thir...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Iterable, Optional import torch from jina import DocumentArray, Executor, requests from jina_commons.batching import get_docs_batch_generator from .audio_clip.model import AudioCLIP class A...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Iterable, Optional import torch from jina import DocumentArray, Executor, requests from jina_commons.batching import get_docs_batch_generator from .audio_clip.model import AudioCLIP class A...
from unittest.mock import MagicMock, patch from langchain_huggingface import HuggingFacePipeline DEFAULT_MODEL_ID = "gpt2" def test_initialization_default() -> None: """Test default initialization.""" llm = HuggingFacePipeline() assert llm.model_id == DEFAULT_MODEL_ID @patch("transformers.pipeline") ...
from unittest.mock import MagicMock, patch from langchain_huggingface import HuggingFacePipeline DEFAULT_MODEL_ID = "gpt2" def test_initialization_default() -> None: """Test default initialization""" llm = HuggingFacePipeline() assert llm.model_id == DEFAULT_MODEL_ID @patch("transformers.pipeline") ...
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], ke...
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomChoiceResize', scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], keep...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
import requests from packaging import version from typing import Union, List, Optional from llama_index.core.base.llms.types import ( ChatResponse, ) def get_max_input_tokens(url: str) -> Union[int, None]: url = f"{url}/info" model_info = dict(requests.get(url).json()) tgi_version = model_info.get("ve...
import requests from packaging import version from typing import Sequence, Union, List, Optional from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ) from text_generation.types import ( Message, ) def resolve_tgi_function_call(url: str) -> bool: url = f"{url}/info" model_inf...