input
stringlengths
33
5k
output
stringlengths
32
5k
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
TEXT_MIMETYPE = 'text' AUDIO_MIMETYPE = 'audio' IMAGE_MIMETYPE = 'image' OBJ_MIMETYPE = 'application/x-tgif' VIDEO_MIMETYPE = 'video' MESH_EXTRA_EXTENSIONS = [ '3ds', '3mf', 'ac', 'ac3d', 'amf', 'assimp', 'bvh', 'cob', 'collada', 'ctm', 'dxf', 'e57', 'fbx', 'gltf...
import contextlib import os import sqlite3 import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def _check_sql_dataset(dataset, expected_f...
import contextlib import os import sqlite3 import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def _check_sql_dataset(dataset, expected_f...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling ...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import pytest from mmengine import Config, DefaultScope from mmengine.hub import get_config, get_model from mmengine.utils import get_installed_path, is_installed data_path = osp.join(osp.dirname(osp.dirname(__file__)), 'data/') # mmdet has a mo...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import pytest from mmengine import Config, DefaultScope from mmengine.hub import get_config, get_model from mmengine.utils import get_installed_path, is_installed data_path = osp.join(osp.dirname(osp.dirname(__file__)), 'data/') # mmdet has a mo...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, ...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1...
from typing import List, Optional import torchaudio from torchaudio._internal.module_utils import deprecated from . import utils # TODO: Once legacy global backend is removed, move this to torchaudio.__init__ def _init_backend(): torchaudio.info = utils.get_info_func() torchaudio.load = utils.get_load_func(...
import warnings from typing import List, Optional import torchaudio from . import utils # TODO: Once legacy global backend is removed, move this to torchaudio.__init__ def _init_backend(): torchaudio.info = utils.get_info_func() torchaudio.load = utils.get_load_func() torchaudio.save = utils.get_save_fu...
from __future__ import annotations from collections.abc import Collection from dataclasses import dataclass, field from typing import Any, Callable import torch from sentence_transformers.data_collator import SentenceTransformerDataCollator @dataclass class CrossEncoderDataCollator(SentenceTransformerDataCollator)...
from __future__ import annotations from dataclasses import field from typing import Any, Callable import torch from sentence_transformers.data_collator import SentenceTransformerDataCollator class CrossEncoderDataCollator(SentenceTransformerDataCollator): """Collator for a CrossEncoder model. This encodes ...
from __future__ import annotations from sentence_transformers.sparse_encoder.evaluation.ReciprocalRankFusionEvaluator import ( ReciprocalRankFusionEvaluator, ) from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import ( SparseBinaryClassificationEvaluator, ) from sentence_...
from __future__ import annotations from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import ( SparseBinaryClassificationEvaluator, ) from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import ( SparseEmbeddingSimilarityEvaluator, ) from...
from abc import abstractmethod from typing import Iterable, Iterator from qdrant_client import QdrantClient from qdrant_client.http.exceptions import UnexpectedResponse from qdrant_client.http.models.models import ( PointIdsList, PointStruct, VectorParams, ) from docarray import Document from docarray.arr...
from abc import abstractmethod from typing import Iterable, Iterator from qdrant_client import QdrantClient from qdrant_client.http.exceptions import UnexpectedResponse from qdrant_client.http.models.models import ( PointIdsList, PointsList, ScrollRequest, PointStruct, ) from docarray import Document ...
"""OpenAI Finetuning.""" import logging import os import time from typing import Any, Optional import openai from openai import OpenAI as SyncOpenAI from openai.types.fine_tuning import FineTuningJob from llama_index.core.llms.llm import LLM from llama_index.finetuning.callbacks.finetuning_handler import OpenAIFineT...
"""OpenAI Finetuning.""" import logging import os import time from typing import Any, Optional import openai from openai import OpenAI as SyncOpenAI from openai.types.fine_tuning import FineTuningJob from llama_index.core.llms.llm import LLM from llama_index.finetuning.callbacks.finetuning_handler import OpenAIFineT...
import torchaudio try: torchaudio._extension._load_lib("libtorchaudio_decoder") from .ctc_decoder import Hypothesis, CTCDecoder, ctc_decoder, lexicon_decoder, download_pretrained_files except ImportError as err: raise ImportError( "flashlight decoder bindings are required to use this functionality....
import torchaudio try: torchaudio._extension._load_lib("libtorchaudio_decoder") from .ctc_decoder import Hypothesis, LexiconDecoder, lexicon_decoder, download_pretrained_files except ImportError as err: raise ImportError( "flashlight decoder bindings are required to use this functionality. " ...
from typing import Dict, Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from pydantic import BaseModel, Field from langchain_community.tools.gmail.base import GmailBaseTool class GetThreadSchema(BaseModel): """Input for GetMessageTool.""" # From https://support.google.com/mai...
from typing import Dict, Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from pydantic import BaseModel, Field from langchain_community.tools.gmail.base import GmailBaseTool class GetThreadSchema(BaseModel): """Input for GetMessageTool.""" # From https://support.google.com/mai...
import os import re import subprocess from keras.src import backend # For torch, use index url to avoid installing nvidia drivers for the test. BACKEND_REQ = { "tensorflow": ("tensorflow-cpu", ""), "torch": ( "torch", "--extra-index-url https://download.pytorch.org/whl/cpu ", ), "jax":...
import os import re import subprocess from keras.src import backend # For torch, use index url to avoid installing nvidia drivers for the test. BACKEND_REQ = { "tensorflow": ("tensorflow-cpu", ""), "torch": ( "torch", "--extra-index-url https://download.pytorch.org/whl/cpu ", ), "jax":...
"""Test embedding utility functions.""" import numpy as np from llama_index.core.indices.query.embedding_utils import ( get_top_k_embeddings, get_top_k_mmr_embeddings, ) def test_get_top_k_mmr_embeddings() -> None: """Test Maximum Marginal Relevance.""" # Results score should follow from the mmr algo...
""" Test embedding utility functions.""" import numpy as np from llama_index.core.indices.query.embedding_utils import ( get_top_k_embeddings, get_top_k_mmr_embeddings, ) def test_get_top_k_mmr_embeddings() -> None: """Test Maximum Marginal Relevance.""" # Results score should follow from the mmr alg...
from markitdown import MarkItDown from llama_index.core.bridge.pydantic import BaseModel, model_validator import os from pathlib import Path from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from typing import Tuple, Optional, Union, List from typing_extensions imp...
from markitdown import MarkItDown from llama_index.core.bridge.pydantic import BaseModel, model_validator import os from pathlib import Path from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from typing import Tuple, Optional, Union, List from typing_extensions imp...
from __future__ import annotations from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseGISTEmbedLoss(GISTEmbedLoss): def __init__( self, model: SparseEncoder, guide: SparseEncoder, ...
from __future__ import annotations from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseGISTEmbedLoss(GISTEmbedLoss): def __init__( self, model: SparseEncoder, guide: SparseEncoder, ...
"""Argparser module for Flow""" from jina.parsers.base import set_base_parser from jina.parsers.helper import KVAppendAction, add_arg_group from jina.parsers.orchestrate.base import mixin_essential_parser def mixin_flow_features_parser(parser): """Add the arguments for the Flow features to the parser :param...
"""Argparser module for Flow""" from jina.parsers.base import set_base_parser from jina.parsers.helper import KVAppendAction, add_arg_group from jina.parsers.orchestrate.base import mixin_essential_parser def mixin_flow_features_parser(parser): """Add the arguments for the Flow features to the parser :param...
from .faiss_lmdb import FaissLMDBSearcher
from .faiss_lmdb import FaissLMDBSearcher
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_doc import BaseDoc from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='TextDoc') class TextDoc(BaseDoc): """ Document for handling text. It can contain: - a [...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_doc import BaseDoc from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='TextDoc') class TextDoc(BaseDoc): """ Document for handling text. It can contain a TextUrl (`...
from os.path import join from pathlib import Path from typing import Any, Callable, List, Optional, Tuple, Union from PIL import Image from .utils import check_integrity, download_and_extract_archive, list_dir, list_files from .vision import VisionDataset class Omniglot(VisionDataset): """`Omniglot <https://git...
from os.path import join from pathlib import Path from typing import Any, Callable, List, Optional, Tuple, Union from PIL import Image from .utils import check_integrity, download_and_extract_archive, list_dir, list_files from .vision import VisionDataset class Omniglot(VisionDataset): """`Omniglot <https://git...
import types from typing import TYPE_CHECKING from docarray.index.backends.in_memory import InMemoryExactNNIndex from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401 ...
import types from typing import TYPE_CHECKING from docarray.index.backends.in_memory import InMemoryExactNNIndex from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401 ...
import subprocess from pathlib import Path import pytest @pytest.fixture(scope='session') def docker_image_name() -> str: return Path(__file__).parents[1].stem.lower() @pytest.fixture(scope='session') def build_docker_image(docker_image_name: str) -> str: subprocess.run(['docker', 'build', '-t', docker_ima...
import random import pytest from jina import Document, DocumentArray @pytest.fixture def documents_chunk(): document_array = DocumentArray() document = Document(tags={'query_size': 35, 'query_price': 31, 'query_brand': 1}) for i in range(0, 10): chunk = Document() for j in range(0, 10): ...
"""Argparser module for pinging""" from jina.parsers.base import set_base_parser def set_ping_parser(parser=None): """Set the parser for `ping` :param parser: an existing parser to build upon :return: the parser """ if not parser: parser = set_base_parser() parser.add_argument( ...
"""Argparser module for pinging""" from jina.parsers.base import set_base_parser def set_ping_parser(parser=None): """Set the parser for `ping` :param parser: an existing parser to build upon :return: the parser """ if not parser: parser = set_base_parser() parser.add_argument( ...
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', in...
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', in...
import asyncio from typing import AsyncIterator, Iterator, Optional, Union from jina.helper import get_or_reuse_loop class _RequestsCounter: """Class used to wrap a count integer so that it can be updated inside methods. .. code-block:: python def count_increment(i: int, rc: _RequestsCounter): ...
import asyncio from typing import AsyncIterator, Iterator, Optional, Union from jina.helper import get_or_reuse_loop class _RequestsCounter: """Class used to wrap a count integer so that it can be updated inside methods. .. code-block:: python def count_increment(i: int, rc: _RequestsCounter): ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.applications import convnext as convnext from keras.applications import densenet as densenet from keras.applications import efficientnet as efficientnet from keras.applications import eff...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api.applications import convnext from keras.api.applications import densenet from keras.api.applications import efficientnet from keras.api.applications import efficientnet_v2 from keras....
# Copyright (c) OpenMMLab. All rights reserved. from .augment_wrappers import AutoAugment, RandAugment from .colorspace import (AutoContrast, Brightness, Color, ColorTransform, Contrast, Equalize, Invert, Posterize, Sharpness, Solarize, SolarizeAdd) from .formatting imp...
# Copyright (c) OpenMMLab. All rights reserved. from .augment_wrappers import AutoAugment, RandAugment from .colorspace import (AutoContrast, Brightness, Color, ColorTransform, Contrast, Equalize, Invert, Posterize, Sharpness, Solarize, SolarizeAdd) from .formatting imp...
from collections.abc import Sequence from langchain_core.tools import BaseTool def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None: """Validate tools for single input. Args: class_name: Name of the class. tools: List of tools to validate. Raises: ...
from collections.abc import Sequence from langchain_core.tools import BaseTool def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None: """Validate tools for single input. Args: class_name: Name of the class. tools: List of tools to validate. Raises: ...
"""Zapier Tool.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warni...
"""Zapier Tool.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warni...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 11 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .registry import Registry # manage all kinds of runners like `EpochBasedRunner` an...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 11 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .registry import Registry # manage all kinds of runners like `EpochBasedRunner` an...
from collections.abc import Sequence from typing import Callable from langchain_core.agents import AgentAction from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage from langchain_core.prompts.chat import ChatPromptTemplate from langchain_core.runnables import Run...
from typing import Callable, List, Sequence, Tuple from langchain_core.agents import AgentAction from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage from langchain_core.prompts.chat import ChatPromptTemplate from langchain_core.runnables import Runnable, Runnabl...
import logging import pathlib from postmarker.core import PostmarkClient from postmarker.models.emails import EmailManager from prisma.enums import NotificationType from pydantic import BaseModel from backend.data.notifications import ( NotificationEventModel, NotificationTypeOverride, T_co, ) from backen...
import logging import pathlib from postmarker.core import PostmarkClient from postmarker.models.emails import EmailManager from prisma.enums import NotificationType from pydantic import BaseModel from backend.data.notifications import ( NotificationEventModel, NotificationTypeOverride, T_co, ) from backen...
import io import pathlib from collections import namedtuple from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper from torchvision.prototype.datapoints import Image, Label from torchvision.prototype.datasets.utils import Dataset, GDriveRe...
import io import pathlib from collections import namedtuple from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper from torchvision.prototype import features from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineR...
import logging import os import sys from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_ffmpeg, _init_sox, _load_lib _LG = logging.getLogger(__name__) # Note: # `_check_cuda_version` is not ...
import logging import os import sys from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_ffmpeg, _init_sox, _load_lib _LG = logging.getLogger(__name__) # Note: # `_check_cuda_version` is not ...
""" This script runs the evaluation of an SBERT msmarco model on the MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product. Usage: python eval_msmarco.py model_name [max_corpus_size_in_thousands] """ import logging import os import sys import tarfile from sentence_tran...
""" This script runs the evaluation of an SBERT msmarco model on the MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product. Usage: python eval_msmarco.py model_name [max_corpus_size_in_thousands] """ from sentence_transformers import LoggingHandler, SentenceTransformer,...
from io import BytesIO from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing import AudioNdArray, NdArray, VideoNdArray from docarray.typing.abstract_type import AbstractType from docar...
from io import BytesIO from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar import numpy as np from pydantic import parse_obj_as from pydantic.validators import bytes_validator from docarray.typing import AudioNdArray, NdArray, VideoNdArray from docarray.typing.abstract_type import AbstractType from docar...
import logging import aiohttp from fastapi import APIRouter from backend.util.settings import Settings from .models import TurnstileVerifyRequest, TurnstileVerifyResponse logger = logging.getLogger(__name__) router = APIRouter() settings = Settings() @router.post("/verify", response_model=TurnstileVerifyResponse...
import logging import aiohttp from fastapi import APIRouter from backend.util.settings import Settings from .models import TurnstileVerifyRequest, TurnstileVerifyResponse logger = logging.getLogger(__name__) router = APIRouter() settings = Settings() @router.post("/verify", response_model=TurnstileVerifyResponse...
import os import fsspec import pytest from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info from .utils import require_lz4, require_zstandard def test_extract_path_from_uri(): ...
import os import fsspec import pytest from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info from .utils import require_lz4, require_zstandard def test_extract_path_from_uri(): ...
""" This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix) and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages. The WikiMatrix mined parallel sentences from Wikipedia in various languages. ...
""" This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix) and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages. The WikiMatrix mined parallel sentences from Wikipedia in various languages. ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.optimizers.schedules.learning_rate_schedule import ( CosineDecay as CosineDecay, ) from keras.src.optimizers.schedules.learning_rate_schedule import ( CosineDecayRestarts as C...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay from keras.src.optimizers.schedules.learning_rate_schedule import ( CosineDecayRestarts, ) from keras.src.optimizers...
import torch from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS from ._bounding_box import BoundingBoxes, BoundingBoxFormat from ._datapoint import Datapoint from ._image import Image from ._mask import Mask from ._torch_function_helpers import set_return_type from ._video import Video if _...
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS from ._bounding_box import BoundingBoxes, BoundingBoxFormat from ._datapoint import Datapoint from ._image import Image from ._mask import Mask from ._video import Video if _WARN_ABOUT_BETA_TRANSFORMS: import warnings warnings.warn...
import numpy as np import pytest from docarray.computation.numpy_backend import NumpyCompBackend def test_to_device(): with pytest.raises(NotImplementedError): NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta') @pytest.mark.parametrize( 'array,result', [ (np.zeros((5)), 1), ...
import numpy as np import pytest from docarray.computation.numpy_backend import NumpyCompBackend def test_to_device(): with pytest.raises(NotImplementedError): NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta') def test_empty(): array = NumpyCompBackend.empty((10, 3)) assert array.shape ...
"""Class for a VectorStore-backed memory object.""" from collections.abc import Sequence from typing import Any, Optional, Union from langchain_core._api import deprecated from langchain_core.documents import Document from langchain_core.memory import BaseMemory from langchain_core.vectorstores import VectorStoreRetr...
"""Class for a VectorStore-backed memory object.""" from collections.abc import Sequence from typing import Any, Optional, Union from langchain_core._api import deprecated from langchain_core.documents import Document from langchain_core.vectorstores import VectorStoreRetriever from pydantic import Field from langch...
import logging import random from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/spl...
import logging import random from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/spl...
import contextlib import os import shutil import threading import time import pytest from jina import Client, DocumentArray, Executor, Flow, requests, Deployment from jina.helper import random_port cur_dir = os.path.dirname(__file__) @contextlib.contextmanager def _update_file(input_file_path, output_file_path, te...
import contextlib import os import shutil import threading import time import pytest from jina import Client, DocumentArray, Executor, Flow, requests from jina.helper import random_port cur_dir = os.path.dirname(__file__) @contextlib.contextmanager def _update_file(input_file_path, output_file_path, temp_path): ...
"""Integration test for JIRA API Wrapper.""" from langchain_community.utilities.jira import JiraAPIWrapper def test_search() -> None: """Test for Searching issues on JIRA""" jql = "project = TP" jira = JiraAPIWrapper() output = jira.run("jql", jql) assert "issues" in output def test_getprojects...
"""Integration test for JIRA API Wrapper.""" from langchain_community.utilities.jira import JiraAPIWrapper def test_search() -> None: """Test for Searching issues on JIRA""" jql = "project = TP" jira = JiraAPIWrapper() # type: ignore[call-arg] output = jira.run("jql", jql) assert "issues" in out...
__version__ = '0.13.11' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
__version__ = '0.13.10' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
import asyncio import os import random import string import tempfile import time import pytest from jina import helper @pytest.fixture(scope='function') def random_workspace_name(): """Generate a random workspace name with digits and letters.""" rand = ''.join(random.choices(string.ascii_uppercase + string....
import asyncio import os import random import string import tempfile import time import pytest from jina import helper @pytest.fixture(scope='function') def random_workspace_name(): """Generate a random workspace name with digits and letters.""" rand = ''.join(random.choices(string.ascii_uppercase + string....
import os from typing import Dict DEPLOYMENT_FILES = [ 'statefulset-executor', 'deployment-executor', 'deployment-gateway', 'deployment-uses-before', 'deployment-uses-after', 'deployment-uses-before-after', ] cur_dir = os.path.dirname(__file__) DEFAULT_RESOURCE_DIR = os.path.join( cur_dir,...
import os from typing import Dict DEPLOYMENT_FILES = [ 'statefulset-executor', 'deployment-executor', 'deployment-gateway', 'deployment-uses-before', 'deployment-uses-after', 'deployment-uses-before-after', ] cur_dir = os.path.dirname(__file__) DEFAULT_RESOURCE_DIR = os.path.join( cur_dir,...
from typing import Any, Union from langchain_core.utils.json import parse_json_markdown from typing_extensions import override from langchain.evaluation.schema import StringEvaluator class JsonSchemaEvaluator(StringEvaluator): """An evaluator that validates a JSON prediction against a JSON schema reference. ...
from typing import Any, Union from langchain_core.utils.json import parse_json_markdown from langchain.evaluation.schema import StringEvaluator class JsonSchemaEvaluator(StringEvaluator): """An evaluator that validates a JSON prediction against a JSON schema reference. This evaluator checks if a given JSON...
"""Simple Reader that reads transcript and general info of Bilibili video.""" import warnings from typing import Any, List from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class BilibiliTranscriptReader(BaseReader): """Bilibili Transcript and video info reader.""...
"""Simple Reader that reads transcript and general info of Bilibili video.""" import warnings from typing import Any, List from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class BilibiliTranscriptReader(BaseReader): """Bilibili Transcript and video info reader.""...
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' from typing import Any, Iterable, Optional import librosa as lr import numpy as np import torch from jina import DocumentArray, Executor, requests from .audio_clip.model import AudioCLIP class AudioCLIPEncode...
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' from typing import Any, Iterable, Optional import librosa as lr import numpy as np import torch from jina import DocumentArray, Executor, requests from jina.excepts import BadDocType from .audio_clip.model impo...
""" Each spoke runs in an isolated process. We leverage the seccomp and setrlimit system utilities to restrict access to system calls and set limits on the resources a process can consume. To implement them, we define several helper functions here, which can be configured to meet specific security or system requirement...
""" Each spoke runs in an isolated process. We leverage the seccomp and setrlimit system utilities to restrict access to system calls and set limits on the resources a process can consume. To implement them, we define several helper functions here, which can be configured to meet specific security or system requirement...
import csv import gzip import os from . import InputExample class STSDataReader: """Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx) Default values expects a tab separated file with the first & second column the sentence pair and third column ...
from . import InputExample import csv import gzip import os class STSDataReader: """Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx) Default values expects a tab separated file with the first & second column the sentence pair and third column t...
import csv import logging import os from typing import List import numpy as np from sentence_transformers import InputExample logger = logging.getLogger(__name__) class CEBinaryAccuracyEvaluator: """ This evaluator can be used with the CrossEncoder class. It is designed for CrossEncoders with 1 output...
import logging import os import csv from typing import List from ... import InputExample import numpy as np logger = logging.getLogger(__name__) class CEBinaryAccuracyEvaluator: """ This evaluator can be used with the CrossEncoder class. It is designed for CrossEncoders with 1 outputs. It measure the ...
_base_ = 'yolact_r50_1x8_coco.py' # optimizer optim_wrapper = dict( type='OptimWrapper', optimizer=dict(lr=8e-3), clip_grad=dict(max_norm=35, norm_type=2)) # learning rate max_epochs = 55 param_scheduler = [ dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000), dict( t...
_base_ = 'yolact_r50_1x8_coco.py' optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[20, 42, 49, 52]) ...
""" ======================== Decision Tree Regression ======================== In this example, we demonstrate the effect of changing the maximum depth of a decision tree on how it fits to the data. We perform this once on a 1D regression task and once on a multi-output regression task. """ # Authors: The scikit-learn...
""" =================================================================== Decision Tree Regression =================================================================== A 1D regression with decision tree. The :ref:`decision trees <tree>` is used to fit a sine curve with addition noisy observation. As a result, it learns ...
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_doc import BaseDoc from docarray.documents.point_cloud.points_and_colors import PointsAndColors from docarray.typing import AnyEmbedding, PointCloud3DUrl from docarray.typing.tensor.abstract_tensor import Abstr...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_doc import BaseDoc from docarray.documents.point_cloud.points_and_colors import PointsAndColors from docarray.typing import AnyEmbedding, PointCloud3DUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from ...
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .compat_config import compat_cfg from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean, sync_random_seed) from .logger import get_caller_name, get_root_logger, log_img_scale from .memory i...
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .compat_config import compat_cfg from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean, sync_random_seed) from .logger import get_caller_name, get_root_logger, log_img_s...
from collections.abc import AsyncIterator, Iterator, Sequence from typing import ( Any, Callable, Optional, TypeVar, Union, ) from langchain_core.stores import BaseStore K = TypeVar("K") V = TypeVar("V") class EncoderBackedStore(BaseStore[K, V]): """Wraps a store with key and value encoders/...
from typing import ( Any, AsyncIterator, Callable, Iterator, List, Optional, Sequence, Tuple, TypeVar, Union, ) from langchain_core.stores import BaseStore K = TypeVar("K") V = TypeVar("V") class EncoderBackedStore(BaseStore[K, V]): """Wraps a store with key and value enc...
"""Load agent.""" import contextlib from collections.abc import Sequence from typing import Any, Optional from langchain_core._api import deprecated from langchain_core.callbacks import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool from lan...
"""Load agent.""" import contextlib from collections.abc import Sequence from typing import Any, Optional from langchain_core._api import deprecated from langchain_core.callbacks import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool from lan...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.typing import AnyEmbedding, AudioUrl from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.t...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.typing import AnyEmbedding, AudioUrl from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.t...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET from mmdet.models.builder import HEADS from .base_panoptic_fusion_head import BasePanopticFusionHead @HEADS.register_module() class HeuristicFusionHead(BasePanopticFusionHead): """Fusion Head wit...
import torch from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET from mmdet.models.builder import HEADS from .base_panoptic_fusion_head import BasePanopticFusionHead @HEADS.register_module() class HeuristicFusionHead(BasePanopticFusionHead): """Fusion Head with Heuristic method.""" def __init__(self, ...
import numpy as np from docarray.array import DocumentArray from docarray.document import BaseDocument from docarray.typing import NdArray def test_get_bulk_attributes_function(): class Mmdoc(BaseDocument): text: str tensor: NdArray N = 10 da = DocumentArray[Mmdoc]( (Mmdoc(text=...
import numpy as np from docarray.array import DocumentArray from docarray.document import BaseDocument from docarray.typing import NdArray def test_get_bulk_attributes_function(): class Mmdoc(BaseDocument): text: str tensor: NdArray N = 10 da = DocumentArray[Mmdoc]( (Mmdoc(text=...
from __future__ import annotations import logging from typing import Literal import torch from torch import Tensor from sentence_transformers.models.InputModule import InputModule from .tokenizer import WhitespaceTokenizer logger = logging.getLogger(__name__) class BoW(InputModule): """Implements a Bag-of-Wo...
from __future__ import annotations import json import logging import os from typing import Literal import torch from torch import Tensor, nn from .tokenizer import WhitespaceTokenizer logger = logging.getLogger(__name__) class BoW(nn.Module): """Implements a Bag-of-Words (BoW) model to derive sentence embeddi...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path from typing import List import pytest from jina import Document, DocumentArray, Executor from laser_encoder import LaserEncoder _EMBEDDING_DIM = 1024 @pytest.fixture(scope='session') ...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path from typing import List import pytest from jina import Document, DocumentArray, Executor from laser_encoder import LaserEncoder _EMBEDDING_DIM = 1024 @pytest.fixture(scope='session') ...
# Copyright (c) OpenMMLab. All rights reserved. from .data_preprocessor import (BatchFixedSizePad, BatchResize, BatchSyncRandomResize, DetDataPreprocessor, MultiBranchDataPreprocessor) __all__ = [ 'DetDataPreprocessor', 'BatchSyncRandomResize', 'Batch...
# Copyright (c) OpenMMLab. All rights reserved. from .data_preprocessor import (BatchFixedSizePad, BatchSyncRandomResize, DetDataPreprocessor, MultiBranchDataPreprocessor) __all__ = [ 'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad'...
from functools import partial from typing import Any, Optional import torch import torch.nn as nn from ..transforms._presets import ImageClassification from ..utils import _log_api_usage_once from ._api import register_model, Weights, WeightsEnum from ._meta import _IMAGENET_CATEGORIES from ._utils import _ovewrite_n...
from functools import partial from typing import Any, Optional import torch import torch.nn as nn from ..transforms._presets import ImageClassification from ..utils import _log_api_usage_once from ._api import register_model, Weights, WeightsEnum from ._meta import _IMAGENET_CATEGORIES from ._utils import _ovewrite_n...
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class SparkDatasetReader(AbstractDatasetReader): """A dataset reader that reads from a Spark DataFrame. ...
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class SparkDatasetReader(AbstractDatasetReader): """A dataset reader that reads from a Spark DataFrame. ...
from typing import Any import pytest from langchain_tests.conftest import CustomPersister, CustomSerializer from langchain_tests.conftest import _base_vcr_config as _base_vcr_config from vcr import VCR # type: ignore[import-untyped] def remove_request_headers(request: Any) -> Any: for k in request.headers: ...
from typing import Any import pytest from langchain_tests.conftest import YamlGzipSerializer from langchain_tests.conftest import _base_vcr_config as _base_vcr_config from vcr import VCR # type: ignore[import-untyped] def remove_request_headers(request: Any) -> Any: for k in request.headers: request.hea...
import warnings from typing import Any, List, Union import PIL.Image import torch from torchvision.prototype import features from torchvision.transforms import functional as _F @torch.jit.unused def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image: call = ", num_output_channe...
import warnings from typing import Any, List, Union import PIL.Image import torch from torchvision.prototype import features from torchvision.transforms import functional as _F @torch.jit.unused def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image: call = ", num_output_channe...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine.data import InstanceData from mmdet.models.dense_heads import NASFCOSHead class TestNASFCOSHead(TestCase): def test_nasfcos_head_loss(self): """Tests nasfcos head loss when truth is empty and non-em...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine.data import InstanceData from mmdet.models.dense_heads import NASFCOSHead class TestNASFCOSHead(TestCase): def test_nasfcos_head_loss(self): """Tests nasfcos head loss when truth is empty and non-em...
from typing import Literal from pydantic import SecretStr from backend.data.model import ( APIKeyCredentials, CredentialsField, CredentialsMetaInput, OAuth2Credentials, ) from backend.util.settings import Secrets secrets = Secrets() GITHUB_OAUTH_IS_CONFIGURED = bool( secrets.github_client_id and ...
from typing import Literal from autogpt_libs.supabase_integration_credentials_store.types import ( APIKeyCredentials, OAuth2Credentials, ) from pydantic import SecretStr from backend.data.model import CredentialsField, CredentialsMetaInput from backend.util.settings import Secrets secrets = Secrets() GITHUB_...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET from mmdet.registry import MODELS from .base_panoptic_fusion_head import BasePanopticFusionHead @MODELS.register_module() class HeuristicFusionHead(BasePanopticFusionHead): """Fusion Head...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET from mmdet.models.builder import HEADS from .base_panoptic_fusion_head import BasePanopticFusionHead @HEADS.register_module() class HeuristicFusionHead(BasePanopticFusionHead): """Fusion ...
import numpy as np import pytest from docarray.proto import DocumentProto, NodeProto from docarray.typing import NdArray @pytest.mark.proto def test_ndarray(): original_ndarray = np.zeros((3, 224, 224)) custom_ndarray = NdArray._docarray_from_native(original_ndarray) tensor = NdArray.from_protobuf(cus...
import numpy as np from docarray.proto import DocumentProto, NodeProto from docarray.typing import NdArray def test_ndarray(): original_ndarray = np.zeros((3, 224, 224)) custom_ndarray = NdArray._docarray_from_native(original_ndarray) tensor = NdArray.from_protobuf(custom_ndarray.to_protobuf()) a...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings input_size = 300 model = dict( bbox_head=dict( type='SSDHead', anchor_generator=dict( type='LegacySSDAnchorGene...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings input_size = 300 model = dict( bbox_head=dict( type='SSDHead', anchor_generator=dict( type='LegacySSDAnchorGene...
import pytest from jina.enums import ProtocolType from jina.helper import ArgNamespace from jina.parsers import set_gateway_parser, set_pod_parser @pytest.mark.parametrize( 'port,expected_port', [ ('12345', [12345]), ([12345], [12345]), ([12345, 12344], [12345, 12344]), ], ) @pyte...
import pytest from jina.enums import GatewayProtocolType from jina.helper import ArgNamespace from jina.parsers import set_gateway_parser, set_pod_parser @pytest.mark.parametrize( 'port,expected_port', [ ('12345', [12345]), ([12345], [12345]), ([12345, 12344], [12345, 12344]), ], ...
import re from typing import Any, Optional from langchain_text_splitters import RecursiveCharacterTextSplitter class JSFrameworkTextSplitter(RecursiveCharacterTextSplitter): """Text splitter that handles React (JSX), Vue, and Svelte code. This splitter extends RecursiveCharacterTextSplitter to handle Re...
import re from typing import Any, Optional from langchain_text_splitters import RecursiveCharacterTextSplitter class JSFrameworkTextSplitter(RecursiveCharacterTextSplitter): """Text splitter that handles React (JSX), Vue, and Svelte code. This splitter extends RecursiveCharacterTextSplitter to handle Re...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class NASFCOS(SingleStageDetector): """NAS-FCOS: Fast Neural Architecture Search for Object Detection. https://arxiv.org/abs/1906.0442 """ def __i...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class NASFCOS(SingleStageDetector): """NAS-FCOS: Fast Neural Architecture Search for Object Detection. https://arxiv.org/abs/1906.0442 """ def __...
import types from typing import Any import torch._C class _ClassNamespace(types.ModuleType): def __init__(self, name: str) -> None: super().__init__("torch.classes" + name) self.name = name def __getattr__(self, attr: str) -> Any: proxy = torch._C._get_custom_class_python_wrapper(sel...
# mypy: allow-untyped-defs import types import torch._C class _ClassNamespace(types.ModuleType): def __init__(self, name): super().__init__("torch.classes" + name) self.name = name def __getattr__(self, attr): proxy = torch._C._get_custom_class_python_wrapper(self.name, attr) ...
import torch def get_modules(use_v2): # We need a protected import to avoid the V2 warning in case just V1 is used if use_v2: import torchvision.datapoints import torchvision.transforms.v2 import v2_extras return torchvision.transforms.v2, torchvision.datapoints, v2_extras ...
import torch def get_modules(use_v2): # We need a protected import to avoid the V2 warning in case just V1 is used if use_v2: import torchvision.datapoints import torchvision.transforms.v2 import v2_extras return torchvision.transforms.v2, torchvision.datapoints, v2_extras ...
import os from jina import Flow, DocumentArray cur_dir = os.path.dirname(__file__) def test_install_reqs(): f = Flow().add( install_requirements=True, uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'), ) with f: resp = f.post(on='/', inputs=DocumentArray.empty(2)) ...
import os from jina import Flow, DocumentArray cur_dir = os.path.dirname(__file__) def test_install_reqs(): f = Flow().add(install_requirements=True, uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml')) with f: resp = f.post(on='/', inputs=DocumentArray.empty(2)) assert len(resp) == 2...
# Copyright (c) OpenMMLab. All rights reserved. from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, MaxIoUAssigner, RegionAssigner) from .builder import build_assigner, build_bbox_coder, build_sampler from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxC...
# Copyright (c) OpenMMLab. All rights reserved. from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, MaxIoUAssigner, RegionAssigner) from .builder import build_assigner, build_bbox_coder, build_sampler from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxC...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.constraints import deserialize as deserialize from keras.src.constraints import get as get from keras.src.constraints import serialize as serialize from keras.src.constraints.constrai...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.constraints import deserialize from keras.src.constraints import get from keras.src.constraints import serialize from keras.src.constraints.constraints import Constraint from keras.sr...
from typing import Optional from docarray.document import BaseDocument from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding class Text(BaseDocument): """ Document for handling text. It can contain a TextUrl (`Text.url`), a str (`Text.text`), and an AnyEmbeddin...
from typing import Optional from docarray.document import BaseDocument from docarray.typing import TextUrl from docarray.typing.tensor.embedding import Embedding class Text(BaseDocument): """ Document for handling text. It can contain a TextUrl (`Text.url`), a str (`Text.text`), and an Embedding (`Te...
_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py'] # yapf:disable model = dict( bbox_head=dict( anchor_generator=dict( base_sizes=[[(220, 125), (128, 222), (264, 266)], [(35, 87), (102, 96), (60, 170)], [(10, 15), (24, 36), (72, 42)]]))) #...
_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py'] # yapf:disable model = dict( bbox_head=dict( anchor_generator=dict( base_sizes=[[(220, 125), (128, 222), (264, 266)], [(35, 87), (102, 96), (60, 170)], [(10, 15), (24, 36), (72, 42)]]))) #...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import os import numpy as np import pytest from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_doc.io.json import orjson_dumps from docarray.typing import Mesh3DUrl, NdArray from docarray.typing.url.mimetypes import ( OBJ_MIMETYPE, AUDIO_MIMETYPE, VIDEO_MIMETYPE, IMAGE_MIMETYPE,...
"""Output classes. **Output** classes are used to represent the output of a language model call and the output of a chat. The top container for information is the `LLMResult` object. `LLMResult` is used by both chat models and LLMs. This object contains the output of the language model and any additional information ...
"""Output classes. **Output** classes are used to represent the output of a language model call and the output of a chat. The top container for information is the `LLMResult` object. `LLMResult` is used by both chat models and LLMs. This object contains the output of the language model and any additional information ...
from abc import ABC, abstractmethod from typing import Callable, List, Sequence, Optional, Union from llama_index.core.agent.workflow.workflow_events import ( AgentOutput, ToolCallResult, ) from llama_index.core.bridge.pydantic import ( BaseModel, Field, ConfigDict, field_validator, ) from llam...
from abc import ABC, abstractmethod from typing import Callable, List, Sequence, Optional, Union from llama_index.core.agent.workflow.workflow_events import ( AgentOutput, ToolCallResult, ) from llama_index.core.bridge.pydantic import ( BaseModel, Field, ConfigDict, field_validator, ) from llam...
from docarray.typing.id import ID from docarray.typing.tensor import Tensor, TorchTensor from docarray.typing.tensor.embedding import Embedding from docarray.typing.url import AnyUrl, ImageUrl __all__ = [ 'TorchTensor', 'Tensor', 'Embedding', 'ImageUrl', 'AnyUrl', 'ID', ]
from docarray.typing.embedding import Embedding from docarray.typing.id import ID from docarray.typing.tensor import Tensor, TorchTensor from docarray.typing.url import AnyUrl, ImageUrl __all__ = ['Tensor', 'Embedding', 'ImageUrl', 'AnyUrl', 'ID', 'TorchTensor']
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig from ..builder import DETECTORS from .single_stage_instance_seg import SingleStageInstanceSegmentor @DETECTORS.register_module() class SOLOv2(SingleStageInstanceSegmentor): """`SOLOv2: Dynamic an...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage_instance_seg import SingleStageInstanceSegmentor @DETECTORS.register_module() class SOLOv2(SingleStageInstanceSegmentor): """`SOLOv2: Dynamic and Fast Instance Segmentation <https://arxiv.org/abs/2003.10152>`_ ...
import logging from backend.data import integrations from backend.data.model import Credentials from ._base import WT, BaseWebhooksManager logger = logging.getLogger(__name__) class ManualWebhookManagerBase(BaseWebhooksManager[WT]): async def _register_webhook( self, credentials: Credentials, ...
import logging from backend.data import integrations from backend.data.model import APIKeyCredentials, Credentials, OAuth2Credentials from ._base import WT, BaseWebhooksManager logger = logging.getLogger(__name__) class ManualWebhookManagerBase(BaseWebhooksManager[WT]): async def _register_webhook( sel...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # TODO: delete custom_imports after mmcls supports auto import # please install mmcls>=1.0 # import mmcls.models to trigger register_module in mm...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # TODO: delete custom_imports after mmcls supports auto import # please install mmcls>=1.0 # import mmcls.models to trigger register_module in mm...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import ConvModule, Linear from mmcv.runner import ModuleList, auto_fp16 from mmdet.models.builder import HEADS from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class CoarseMaskHead(FCNMaskHead): """Coarse mask head used in PointRend. ...
from mmcv.cnn import ConvModule, Linear from mmcv.runner import ModuleList, auto_fp16 from mmdet.models.builder import HEADS from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class CoarseMaskHead(FCNMaskHead): """Coarse mask head used in PointRend. Compared with standard ``FCNMaskHead``, ``Coa...
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'] num_things_classes = 80 num_stuff_classes = 0 num_classes = num_things_classes + num_stuff_classes image_size = (1024, 1024) batch_augments = [ dict( type='BatchFixedSizePad', size=image_size, img_pad_value=0, pad_mask=Tru...
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'] num_things_classes = 80 num_stuff_classes = 0 num_classes = num_things_classes + num_stuff_classes image_size = (1024, 1024) batch_augments = [ dict( type='BatchFixedSizePad', size=image_size, img_pad_value=0, pad_mask=True...
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
"""Test OCI Generative AI embedding service.""" from unittest.mock import MagicMock import pytest from pytest import MonkeyPatch from langchain_community.embeddings import OCIGenAIEmbeddings class MockResponseDict(dict): def __getattr__(self, val): # type: ignore[no-untyped-def] return self[val] @py...
"""Test OCI Generative AI embedding service.""" from unittest.mock import MagicMock import pytest from pytest import MonkeyPatch from langchain_community.embeddings import OCIGenAIEmbeddings class MockResponseDict(dict): def __getattr__(self, val): # type: ignore[no-untyped-def] return self[val] @py...
import keras.src.backend from keras.src import tree from keras.src.layers.layer import Layer from keras.src.random.seed_generator import SeedGenerator from keras.src.utils import backend_utils from keras.src.utils import jax_utils from keras.src.utils import tracking class TFDataLayer(Layer): """Layer that can sa...
import keras.src.backend from keras.src import tree from keras.src.layers.layer import Layer from keras.src.random.seed_generator import SeedGenerator from keras.src.utils import backend_utils from keras.src.utils import tracking class TFDataLayer(Layer): """Layer that can safely used in a tf.data pipeline. ...
import numpy as np import torch from docarray import BaseDocument, DocumentArray, Image, Text from docarray.typing import ( AnyTensor, AnyUrl, Embedding, ImageUrl, Mesh3DUrl, NdArray, PointCloud3DUrl, TextUrl, TorchEmbedding, TorchTensor, ) from docarray.typing.tensor import NdA...
import numpy as np import torch from docarray import Document, DocumentArray, Image, Text from docarray.typing import ( AnyTensor, AnyUrl, Embedding, ImageUrl, Mesh3DUrl, NdArray, PointCloud3DUrl, TextUrl, TorchEmbedding, TorchTensor, ) from docarray.typing.tensor import NdArray...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence, Union import torch from mmengine.data import BaseDataElement from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Sequence[dict]] @HOOKS.register_module() class EmptyCacheHook(Hook): """Releases a...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Optional, Sequence, Tuple, Union import torch from mmengine.data import BaseDataElement from mmengine.registry import HOOKS from .hook import Hook DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]] @HOOKS.register_module() class Empt...