input
stringlengths
33
5k
output
stringlengths
32
5k
"""txtai reader.""" from typing import Any, Dict, List import numpy as np from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class TxtaiReader(BaseReader): """ txtai reader. Retrieves documents through an existing in-memory txtai index. These documents...
"""txtai reader.""" from typing import Any, Dict, List import numpy as np from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class TxtaiReader(BaseReader): """txtai reader. Retrieves documents through an existing in-memory txtai index. These documents can ...
from typing import TYPE_CHECKING, Type if TYPE_CHECKING: # pragma: no cover from pandas import DataFrame from docarray.typing import T class DataframeIOMixin: """Save/load from :class:`pandas.dataframe` .. note:: These functions require you to install `pandas` """ def to_dataframe...
from typing import TYPE_CHECKING, Type if TYPE_CHECKING: from pandas import DataFrame from docarray.typing import T class DataframeIOMixin: """Save/load from :class:`pandas.dataframe` .. note:: These functions require you to install `pandas` """ def to_dataframe(self, **kwargs) -> ...
import torch from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from torchaudio_unittest.models.conformer.conformer_test_impl import ConformerTestImpl @skipIfNoCuda class ConformerFloat32GPUTest(ConformerTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda") ...
import torch from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase from torchaudio_unittest.models.conformer.conformer_test_impl import ConformerTestImpl @skipIfNoCuda class ConformerFloat32GPUTest(ConformerTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda") ...
import contextlib import json import re from typing import Any, List with contextlib.suppress(ImportError): import yaml from llama_index.core.output_parsers.base import OutputParserException def _marshal_llm_to_json(output: str) -> str: """ Extract a substring containing valid JSON or array from a strin...
import contextlib import json import re from typing import Any, List with contextlib.suppress(ImportError): import yaml from llama_index.core.output_parsers.base import OutputParserException def _marshal_llm_to_json(output: str) -> str: """ Extract a substring containing valid JSON or array from a strin...
from pathlib import Path import pytest from jina import Document, DocumentArray, Executor from jina.excepts import BadDocType def test_load(): segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) assert type(segmenter).__name__ == 'VADSpeechSegmenter' @pytest.mark.parametrize('_t...
from pathlib import Path import pytest from jina import Document, DocumentArray, Executor from jina.excepts import BadDocType from ...vad_speech_segmenter import VADSpeechSegmenter def test_load(): segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) assert type(segmenter).__name_...
# Copyright (c) OpenMMLab. All rights reserved. """Get image metas on a specific dataset. Here is an example to run this script. Example: python tools/misc/get_image_metas.py ${CONFIG} \ --out ${OUTPUT FILE NAME} """ import argparse import csv import os.path as osp from multiprocessing import Pool import mmc...
# Copyright (c) OpenMMLab. All rights reserved. """Get image metas on a specific dataset. Here is an example to run this script. Example: python tools/misc/get_image_metas.py ${CONFIG} \ --out ${OUTPUT FILE NAME} """ import argparse import csv import os.path as osp from multiprocessing import Pool import mmc...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from pathlib import Path import cv2 import pytest from jina import Executor, Document, DocumentArray from ...yolov5_segmenter import YoloV5Segmenter cur_dir = os.path.dirname(os.path.abspath(__file__...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from operator import itemgetter import pytest from jina import Executor, Document, DocumentArray import cv2 from ...yolov5_segmenter import YoloV5Segmenter cur_dir = os.path.dirname(os.path.abspath(_...
from typing import Any, Dict, List, Optional from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from llama_index.core.constants import DEFAULT_SIMILARITY_TOP_K from llama_index.core.schema import NodeWithScore, QueryBundle from llama_index.core.se...
from typing import Any, Dict, List, Optional from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from llama_index.core.constants import DEFAULT_SIMILARITY_TOP_K from llama_index.core.schema import NodeWithScore, QueryBundle from llama_index.core.se...
_base_ = '../ssd/ssd300_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) default_hooks = dict( optimizer=dict( _delete_=True, type='OptimizerHook', grad_clip=dict(max_norm=35, norm_type=2)))
_base_ = '../ssd/ssd300_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
import io import PIL.Image import torch from torchvision import tv_tensors from torchvision.io import decode_jpeg, encode_jpeg from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_kernel_internal def e...
import PIL.Image import torch from torchvision import tv_tensors from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_kernel_internal def erase( inpt: torch.Tensor, i: int, j: int, h: in...
import aiohttp import pytest from jina import Executor, Flow, requests from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet from jina.clients.request.helper import _new_data_request from jina.excepts import BadServer from jina.logging.logger import JinaLogger from jina.types.request.data import DataR...
import aiohttp import pytest from jina import Executor, Flow, requests from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet from jina.clients.request.helper import _new_data_request from jina.excepts import BadServer from jina.logging.logger import JinaLogger from jina.types.request.data import Data...
from docarray import Document, DocumentArray import numpy as np def test_success_find_with_added_kwargs(start_storage, monkeypatch): nrof_docs = 1000 num_candidates = 100 elastic_doc = DocumentArray( storage='elasticsearch', config={ 'n_dim': 3, 'distance': 'l2_nor...
from docarray import Document, DocumentArray import numpy as np def test_success_find_with_added_kwargs(start_storage, monkeypatch): nrof_docs = 1000 num_candidates = 100 elastic_doc = DocumentArray( storage='elasticsearch', config={ 'n_dim': 3, 'distance': 'l2_nor...
from unittest.mock import patch, MagicMock import pytest from llama_index.utils.workflow import ( draw_all_possible_flows, draw_most_recent_execution, ) @pytest.mark.asyncio async def test_workflow_draw_methods(workflow): with patch("pyvis.network.Network") as mock_network: draw_all_possible_flo...
from unittest.mock import patch import pytest from llama_index.utils.workflow import ( draw_all_possible_flows, draw_most_recent_execution, ) @pytest.mark.asyncio async def test_workflow_draw_methods(workflow): with patch("pyvis.network.Network") as mock_network: draw_all_possible_flows(workflow...
from torchaudio import ( # noqa: F401 _extension, compliance, datasets, functional, io, kaldi_io, models, pipelines, sox_effects, transforms, utils, ) from torchaudio.backend import get_audio_backend, list_audio_backends, set_audio_backend try: from .version import __v...
from torchaudio import ( # noqa: F401 _extension, compliance, datasets, functional, io, kaldi_io, models, pipelines, sox_effects, transforms, utils, ) from torchaudio.backend import get_audio_backend, list_audio_backends, set_audio_backend try: from .version import __ve...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmdet.models.dense_heads import YOLOXHead def test_yolox_head_loss(): """Tests yolox head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ '...
import mmcv import torch from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmdet.models.dense_heads import YOLOXHead def test_yolox_head_loss(): """Tests yolox head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1...
from functools import wraps from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast from backend.data.credit import get_user_credit_model from backend.data.execution import ( ExecutionResult, create_graph_execution, get_execution_results, get_incomplete_executions, get_la...
from functools import wraps from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast from backend.data.credit import get_user_credit_model from backend.data.execution import ( ExecutionResult, create_graph_execution, get_execution_results, get_incomplete_executions, get_la...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.datasets.builder import build_dataset from mmdet.registry import VISUALIZERS from mmdet.utils import register_al...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.datasets.builder import build_dataset from mmdet.registry import VISUALIZERS from mmdet.utils import register_al...
from .filtering import ( allpass_biquad, band_biquad, bandpass_biquad, bandreject_biquad, bass_biquad, biquad, contrast, dcshift, deemph_biquad, dither, equalizer_biquad, filtfilt, flanger, gain, highpass_biquad, lfilter, lowpass_biquad, overdrive,...
from .filtering import ( allpass_biquad, band_biquad, bandpass_biquad, bandreject_biquad, bass_biquad, biquad, contrast, dcshift, deemph_biquad, dither, equalizer_biquad, filtfilt, flanger, gain, highpass_biquad, lfilter, lowpass_biquad, overdrive,...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class NASFCOS(SingleStageDetector): """NAS-FCOS: Fast Neural Architecture Search for Object Detection. https://arxiv.org/abs/1906.0442 """ def __...
from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class NASFCOS(SingleStageDetector): """NAS-FCOS: Fast Neural Architecture Search for Object Detection. https://arxiv.org/abs/1906.0442 """ def __init__(self, backbone, ...
_base_ = './mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
# Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .dynamic_soft_label_assigner imp...
# Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .dynamic_soft_label_assigner imp...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api import _tf_keras from keras.api import activations from keras.api import applications from keras.api import backend from keras.api import callbacks from keras.api import config from k...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api import _tf_keras from keras.api import activations from keras.api import applications from keras.api import backend from keras.api import callbacks from keras.api import config from k...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import numpy as np from mmengine.fileio import dump, load from mmengine.utils import mkdir_or_exist, track_parallel_progress prog_description = '''K-Fold coco split. To split coco data for semi-supervised object detection: pyth...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import mmcv import numpy as np prog_description = '''K-Fold coco split. To split coco data for semi-supervised object detection: python tools/misc/split_coco.py ''' def parse_args(): parser = argparse.ArgumentParser() ...
from __future__ import annotations from typing import Any, Callable, List, Tuple, Type, Union import PIL.Image from torchvision import datapoints from torchvision._utils import sequence_to_str from torchvision.transforms.v2.functional import get_dimensions, get_size, is_pure_tensor def get_bounding_boxes(flat_inpu...
from __future__ import annotations from typing import Any, Callable, List, Tuple, Type, Union import PIL.Image from torchvision import datapoints from torchvision._utils import sequence_to_str from torchvision.transforms.v2.functional import get_dimensions, get_size, is_simple_tensor def get_bounding_boxes(flat_in...
import numpy as np import pytest from absl.testing import parameterized from keras.src import layers from keras.src import ops from keras.src import random from keras.src import testing class SolarizationTest(testing.TestCase): def _test_input_output(self, layer, input_value, expected_value, dtype): inpu...
import numpy as np import pytest from absl.testing import parameterized from keras.src import layers from keras.src import ops from keras.src import random from keras.src import testing class SolarizationTest(testing.TestCase, parameterized.TestCase): def _test_input_output(self, layer, input_value, expected_val...
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.load import dataset_module_factory, import_main_c...
import os from tempfile import TemporaryDirectory from unittest import TestCase from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.load import dataset_module_factory, import_main_class from data...
from __future__ import annotations from collections.abc import Sequence from copy import deepcopy from typing import Any, Optional, Union from langchain_core._api.deprecation import deprecated from langchain_core.callbacks import Callbacks from langchain_core.documents import BaseDocumentCompressor, Document from lan...
from __future__ import annotations from collections.abc import Sequence from copy import deepcopy from typing import Any, Optional, Union from langchain_core._api.deprecation import deprecated from langchain_core.callbacks import Callbacks from langchain_core.documents import BaseDocumentCompressor, Document from lan...
"""**Messages** are objects used in prompts and chat conversations. **Class hierarchy:** .. code-block:: BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage --> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChu...
"""**Messages** are objects used in prompts and chat conversations. **Class hierarchy:** .. code-block:: BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage --> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChu...
"""Embedding adapter model.""" import logging from typing import Any, List, Optional, Type, cast from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.callbacks import CallbackManager from llama_index.core.constants import DEFAUL...
"""Embedding adapter model.""" import logging from typing import Any, List, Optional, Type, cast from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.callbacks import CallbackManager from llama_index.core.constants import DEFAUL...
from typing import Dict, Type from llama_index.core.llms.llm import LLM from llama_index.core.llms.mock import MockLLM RECOGNIZED_LLMS: Dict[str, Type[LLM]] = { MockLLM.class_name(): MockLLM, } # Conditionals for llama-cloud support try: from llama_index.llms.openai import OpenAI # pants: no-infer-dep ...
from typing import Dict, Type from llama_index.core.llms.llm import LLM from llama_index.core.llms.mock import MockLLM RECOGNIZED_LLMS: Dict[str, Type[LLM]] = { MockLLM.class_name(): MockLLM, } # Conditionals for llama-cloud support try: from llama_index.llms.openai import OpenAI # pants: no-infer-dep ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import pytest from simpleranker import SimpleRanker @pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']]) @pytest.mark.parametrize('ranking', ['min', 'max']) def test_ranking( documents_chunk, docu...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import pytest from ...simpleranker import SimpleRanker @pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']]) @pytest.mark.parametrize('ranking', ['min', 'max']) def test_ranking( documents_chunk, ...
_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py'] # yapf:disable model = dict( bbox_head=dict( anchor_generator=dict( base_sizes=[[(220, 125), (128, 222), (264, 266)], [(35, 87), (102, 96), (60, 170)], [(10, 15), (24, 36), (72, 42)]]))) #...
_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py'] # yapf:disable model = dict( bbox_head=dict( anchor_generator=dict( base_sizes=[[(220, 125), (128, 222), (264, 266)], [(35, 87), (102, 96), (60, 170)], [(10, 15), (24, 36), (72, 42)]]))) #...
from docarray import Document, DocumentArray import numpy as np import pytest @pytest.mark.filterwarnings('ignore::UserWarning') @pytest.mark.parametrize('columns', [[('price', 'int')], {'price': 'int'}]) def test_add_ignore_existing_doc_id(start_storage, columns): elastic_doc = DocumentArray( storage='e...
from docarray import Document, DocumentArray import numpy as np import pytest @pytest.mark.filterwarnings('ignore::UserWarning') def test_add_ignore_existing_doc_id(start_storage): elastic_doc = DocumentArray( storage='elasticsearch', config={ 'n_dim': 3, 'columns': [('pri...
from typing import Optional, List from docarray.base_document.document import BaseDocument def test_base_document_init(): doc = BaseDocument() assert doc.id is not None def test_update(): class MyDocument(BaseDocument): content: str title: Optional[str] = None tags_: List d...
from docarray.base_document.document import BaseDocument def test_base_document_init(): doc = BaseDocument() assert doc.id is not None
import os from typing import Any, Callable, List, Optional, Tuple import torch.utils.data as data from ..utils import _log_api_usage_once class VisionDataset(data.Dataset): """ Base Class For making datasets which are compatible with torchvision. It is necessary to override the ``__getitem__`` and ``__l...
import os from typing import Any, Callable, List, Optional, Tuple import torch import torch.utils.data as data from ..utils import _log_api_usage_once class VisionDataset(data.Dataset): """ Base Class For making datasets which are compatible with torchvision. It is necessary to override the ``__getitem_...
from typing import Dict, TYPE_CHECKING, Optional if TYPE_CHECKING: # pragma: no cover from docarray import Document from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True} COMPARISON_OPERATORS = { '$lt': 'lt', '$gt': 'gt', ...
from typing import Dict, TYPE_CHECKING, Optional if TYPE_CHECKING: from docarray import Document from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True} COMPARISON_OPERATORS = { '$lt': 'lt', '$gt': 'gt', '$lte': 'lte', '...
from collections.abc import Sequence as ABCSequence from typing import Any BASE_TYPES = (int, str, bool, bytes, float) def _is_otel_supported_type(obj: Any) -> bool: # If it's one of the base types if isinstance(obj, BASE_TYPES): return True # If it's a sequence (but not a string or b...
from collections.abc import Sequence as ABCSequence from typing import Any BASE_TYPES = (int, str, bool, bytes, float) def _is_otel_supported_type(obj: Any) -> bool: # If it's one of the base types if isinstance(obj, BASE_TYPES): return True # If it's a sequence (but not a string or byt...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and i...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and i...
from backend.app import run_processes from backend.executor import DatabaseManager, ExecutionScheduler from backend.server.rest_api import AgentServer def main(): """ Run all the processes required for the AutoGPT-server REST API. """ run_processes( DatabaseManager(), ExecutionSchedule...
from backend.app import run_processes from backend.executor import ExecutionScheduler from backend.server.rest_api import AgentServer def main(): """ Run all the processes required for the AutoGPT-server REST API. """ run_processes( ExecutionScheduler(), AgentServer(), ) if __nam...
import asyncio import logging import os from jina import __default_host__ from jina.importer import ImportExtensions from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app __all__ = ['WebSocketGatewayRuntime'] class WebSocketGatewayRuntime(Gatewa...
import asyncio import logging import os from jina import __default_host__ from jina.importer import ImportExtensions from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app __all__ = ['WebSocketGatewayRuntime'] class WebSocketGatewayRuntime(Gatewa...
# Copyright (c) OpenMMLab. All rights reserved. from .checkpoint_hook import CheckpointHook from .early_stopping_hook import EarlyStoppingHook from .ema_hook import EMAHook from .empty_cache_hook import EmptyCacheHook from .hook import Hook from .iter_timer_hook import IterTimerHook from .logger_hook import LoggerHook ...
# Copyright (c) OpenMMLab. All rights reserved. from .checkpoint_hook import CheckpointHook from .ema_hook import EMAHook from .empty_cache_hook import EmptyCacheHook from .hook import Hook from .iter_timer_hook import IterTimerHook from .logger_hook import LoggerHook from .naive_visualization_hook import NaiveVisualiz...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='P...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img...
# Copyright (c) OpenMMLab. All rights reserved. import os import pytest import torch import torch.nn as nn from torch.distributed import destroy_process_group, init_process_group from torch.nn.parallel import DataParallel, DistributedDataParallel from mmengine.model import (MMDistributedDataParallel, ...
# Copyright (c) OpenMMLab. All rights reserved. import os import pytest import torch import torch.nn as nn from torch.distributed import destroy_process_group, init_process_group from torch.nn.parallel import DataParallel, DistributedDataParallel from mmengine.model import (MMDistributedDataParallel, ...
# Copyright 2024 The OpenXLA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in ...
# Copyright 2024 The OpenXLA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in ...
import os import torch import torchaudio.prototype.transforms as T import torchaudio.transforms as transforms from torchaudio_unittest.common_utils import TorchaudioTestCase class BatchConsistencyTest(TorchaudioTestCase): def assert_batch_consistency(self, transform, batch, *args, atol=1e-8, rtol=1e-5, seed=42, ...
import os import torch import torchaudio.prototype.transforms as T import torchaudio.transforms as transforms from torchaudio_unittest.common_utils import TorchaudioTestCase class BatchConsistencyTest(TorchaudioTestCase): def assert_batch_consistency(self, transform, batch, *args, atol=1e-8, rtol=1e-5, seed=42, ...
import logging from datasets import load_dataset from sentence_transformers.sparse_encoder import ( SparseEncoder, SparseRerankingEvaluator, ) logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembledistil") # Load a dataset with ...
import logging from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseEncoder, SparseRerankingEvaluator, SpladePooling, ) logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) # Initialize the SPLA...
from pathlib import Path from typing import Union, Optional, Callable, TYPE_CHECKING, Generator if TYPE_CHECKING: # pragma: no cover from docarray import DocumentArray from docarray.typing import T from multiprocessing.pool import ThreadPool, Pool class DataLoaderMixin: @classmethod def dataload...
from pathlib import Path from typing import Union, Optional, Callable, TYPE_CHECKING, Generator if TYPE_CHECKING: from docarray import DocumentArray from docarray.typing import T from multiprocessing.pool import ThreadPool, Pool class DataLoaderMixin: @classmethod def dataloader( cls, ...
# Copyright (c) OpenMMLab. All rights reserved. from .inference import (async_inference_detector, inference_detector, init_detector) __all__ = [ 'init_detector', 'async_inference_detector', 'inference_detector', ]
# Copyright (c) OpenMMLab. All rights reserved. from .inference import (async_inference_detector, inference_detector, init_detector, show_result_pyplot) from .test import multi_gpu_test, single_gpu_test from .train import (get_root_logger, init_random_seed, set_random_seed, t...
_base_ = [ 'mmcls::_base_/datasets/imagenet_bs256_rsb_a12.py', 'mmcls::_base_/schedules/imagenet_bs2048_rsb.py', 'mmcls::_base_/default_runtime.py' ] model = dict( type='ImageClassifier', backbone=dict( type='mmdet.CSPNeXt', arch='P5', out_indices=(4, ), expand_ratio...
_base_ = [ 'mmcls::_base_/datasets/imagenet_bs256_rsb_a12.py', 'mmcls::_base_/schedules/imagenet_bs2048_rsb.py', 'mmcls::_base_/default_runtime.py' ] custom_imports = dict(imports=['mmdet.models'], allow_failed_imports=False) model = dict( type='ImageClassifier', backbone=dict( type='mmdet...
"""Test Aleph Alpha API wrapper.""" from langchain_community.llms.aleph_alpha import AlephAlpha def test_aleph_alpha_call() -> None: """Test valid call to cohere.""" llm = AlephAlpha(maximum_tokens=10) output = llm.invoke("Say foo:") assert isinstance(output, str)
"""Test Aleph Alpha API wrapper.""" from langchain_community.llms.aleph_alpha import AlephAlpha def test_aleph_alpha_call() -> None: """Test valid call to cohere.""" llm = AlephAlpha(maximum_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str)
from collections import namedtuple from typing import TYPE_CHECKING, Dict, NamedTuple, Optional from urllib.parse import urlparse if TYPE_CHECKING: from ... import DocumentArray _ParsedHost = namedtuple('ParsedHost', 'on host port version scheme') def _parse_host(host: str) -> NamedTuple: """Parse a host s...
from typing import TYPE_CHECKING, Optional, Dict if TYPE_CHECKING: from ... import DocumentArray class PostMixin: """Helper functions for posting DocumentArray to Jina Flow.""" def post( self, host: str, show_progress: bool = False, batch_size: Optional[int] = None, ...
import logging import os import zlib from contextlib import asynccontextmanager from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse from uuid import uuid4 from dotenv import load_dotenv from prisma import Prisma from pydantic import BaseModel, Field, field_validator from backend.util.retry import conn...
import logging import os import zlib from contextlib import asynccontextmanager from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse from uuid import uuid4 from dotenv import load_dotenv from prisma import Prisma from pydantic import BaseModel, Field, field_validator from backend.util.retry import conn...
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 from .source_separation_pipeline import ( CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS, SourceSeparationBundle, ) __all__ = [ "CONVTASNET_BASE_LIBRI2MIX", "EMFORMER_RNNT_BASE_MUSTC", "...
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB_PLUS, SourceSeparationBundle __all__ = [ "CONVTASNET_BASE_LIBRI2MIX", "EMFORMER_RNNT_BASE_MUSTC", "EMFORMER_RNNT_BASE_TEDLIUM3", "Source...
from unittest import TestCase, mock import boto3 from llama_index.core.postprocessor.types import ( BaseNodePostprocessor, NodeWithScore, QueryBundle, ) from llama_index.core.schema import TextNode from llama_index.postprocessor.bedrock_rerank import BedrockRerank class TestBedrockRerank(TestCase): ...
from unittest import TestCase, mock import boto3 from llama_index.core.postprocessor.types import ( BaseNodePostprocessor, NodeWithScore, QueryBundle, ) from llama_index.core.schema import TextNode from llama_index.postprocessor.bedrock_rerank import AWSBedrockRerank class TestAWSBedrockRerank(TestCase)...
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' input_size = (320, 320) train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), # `mean` and `to_rgb` should be the same with the `preprocess_cfg` dict(type='Expand', mean=[0,...
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' # dataset settings # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') input_si...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
_base_ = './retinanet_r50_caffe_fpn_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomResize', scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], keep_ratio...
_base_ = './retinanet_r50_caffe_fpn_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomResize', scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)]), dict(type='Ra...
import time import pytest from typing import List from llama_index.core.schema import Document, TextNode from llama_index.core.node_parser import SentenceSplitter from redis import Redis import docker docker_client = docker.from_env() docker_client.ping() container = docker_client.containers.run( "redis/redis-sta...
import time import pytest from typing import List from llama_index.core.schema import Document, TextNode from llama_index.core.node_parser import SentenceSplitter from redis import Redis import docker docker_client = docker.from_env() docker_client.ping() container = docker_client.containers.run( "redis/redis-sta...
"""Select and order examples based on ngram overlap score (sentence_bleu score). https://www.nltk.org/_modules/nltk/translate/bleu_score.html https://aclanthology.org/P02-1040.pdf """ from typing import Any, Dict, List import numpy as np from langchain_core.example_selectors import BaseExampleSelector from langchain...
"""Select and order examples based on ngram overlap score (sentence_bleu score). https://www.nltk.org/_modules/nltk/translate/bleu_score.html https://aclanthology.org/P02-1040.pdf """ from typing import Any, Dict, List import numpy as np from langchain_core.example_selectors import BaseExampleSelector from langchain...
# Copyright (c) OpenMMLab. All rights reserved. import datetime import os.path as osp import warnings from typing import Optional from mmengine.fileio import dump from mmengine.logging import print_log from . import root from .default_scope import DefaultScope from .registry import Registry def traverse_registry_tre...
# Copyright (c) OpenMMLab. All rights reserved. import datetime import os.path as osp from typing import Optional from mmengine.fileio import dump from mmengine.logging import print_log from . import root from .registry import Registry def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list: ...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.3.2' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versio...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.3.1' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versio...
from typing import Optional from docarray.document import BaseDocument from docarray.typing import AnyTensor, Embedding, PointCloud3DUrl class PointCloud3D(BaseDocument): """ Document for handling point clouds for 3D data representation. Point cloud is a representation of a 3D mesh. It is made by repeat...
from typing import Optional from docarray.document import BaseDocument from docarray.typing import Embedding, PointCloud3DUrl, Tensor class PointCloud3D(BaseDocument): """ Document for handling point clouds for 3D data representation. Point cloud is a representation of a 3D mesh. It is made by repeatedl...
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writ...
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writ...
""" This script contains an example how to perform semantic search with Seismic. For more information, please refer to the documentation: https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md All you need is installing the `pyseismic-lsr` package: ``` pip install pyseismic-lsr ``` """ import time from dat...
""" This script contains an example how to perform semantic search with Seismic. For more information, please refer to the documentation: https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md All you need is installing the `pyseismic-lsr` package: ``` pip install pyseismic-lsr ``` """ import time from dat...
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
import inspect import logging from typing import Any, Callable, Optional from fastapi import HTTPException, Request, Security from fastapi.security import APIKeyHeader, HTTPBearer from starlette.status import HTTP_401_UNAUTHORIZED from .config import settings from .jwt_utils import parse_jwt_token security = HTTPBea...
import inspect import logging from typing import Any, Callable, Optional from fastapi import HTTPException, Request, Security from fastapi.security import APIKeyHeader, HTTPBearer from starlette.status import HTTP_401_UNAUTHORIZED from .config import settings from .jwt_utils import parse_jwt_token security = HTTPBea...
from typing import TYPE_CHECKING from docarray.math.ndarray import get_array_type if TYPE_CHECKING: from docarray.typing import ArrayType import numpy as np def pdist( x_mat: 'ArrayType', metric: str, ) -> 'np.ndarray': """Computes Pairwise distances between observations in n-dimensional space. ...
from typing import TYPE_CHECKING from ..ndarray import get_array_type if TYPE_CHECKING: from ...typing import ArrayType import numpy as np def pdist( x_mat: 'ArrayType', metric: str, ) -> 'np.ndarray': """Computes Pairwise distances between observations in n-dimensional space. :param x_mat:...
import numpy as np from docarray import BaseDoc from docarray.typing import NdArray def test_set_tensor(): class MyDocument(BaseDoc): tensor: NdArray d = MyDocument(tensor=np.zeros((3, 224, 224))) assert isinstance(d.tensor, NdArray) assert isinstance(d.tensor, np.ndarray) assert (d.ten...
import numpy as np from docarray import BaseDocument from docarray.typing import NdArray def test_set_tensor(): class MyDocument(BaseDocument): tensor: NdArray d = MyDocument(tensor=np.zeros((3, 224, 224))) assert isinstance(d.tensor, NdArray) assert isinstance(d.tensor, np.ndarray) ass...
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), sc...
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling op...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling op...
from __future__ import annotations from typing import Any, Union from langchain_core.retrievers import ( BaseRetriever, RetrieverOutput, ) from langchain_core.runnables import Runnable, RunnablePassthrough def create_retrieval_chain( retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]], ...
from __future__ import annotations from typing import Any, Union from langchain_core.retrievers import ( BaseRetriever, RetrieverOutput, ) from langchain_core.runnables import Runnable, RunnablePassthrough def create_retrieval_chain( retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]], ...
_base_ = './htc_r50_fpn_20e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20)
"""Agent components.""" from typing import Any, Callable, Dict, Optional, Set from llama_index.core.base.query_pipeline.query import ( QueryComponent, ) from llama_index.core.bridge.pydantic import Field from llama_index.core.query_pipeline.components.function import ( FnComponent, get_parameters, ) # fr...
"""Agent components.""" from typing import Any, Callable, Dict, Optional, Set from llama_index.core.base.query_pipeline.query import ( QueryComponent, ) from llama_index.core.bridge.pydantic import Field from llama_index.core.query_pipeline.components.function import ( FnComponent, get_parameters, ) # fr...
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function from pydantic import BaseModel, Field def test_convert_pydantic_to_openai_function() -> None: class Data(BaseModel): """The data to return.""" key: str = Field(..., description="API key") days: int = Fie...
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function from pydantic import BaseModel, Field def test_convert_pydantic_to_openai_function() -> None: class Data(BaseModel): """The data to return.""" key: str = Field(..., description="API key") days: int = Fie...
from typing import TYPE_CHECKING, Type, Optional if TYPE_CHECKING: from docarray.typing import T from docarray.proto.docarray_pb2 import DocumentProto class ProtobufMixin: @classmethod def from_protobuf(cls: Type['T'], pb_msg: 'DocumentProto') -> 'T': from docarray.proto.io import parse_proto...
from typing import TYPE_CHECKING, Type, Optional if TYPE_CHECKING: from ...typing import T from ...proto.docarray_pb2 import DocumentProto class ProtobufMixin: @classmethod def from_protobuf(cls: Type['T'], pb_msg: 'DocumentProto') -> 'T': from ...proto.io import parse_proto return p...
from typing import Sequence, cast import prisma.enums import prisma.types AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = { "Input": True, "Output": True, "Webhook": True, "AgentBlock": True, } AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = { "Nodes": {"include": AGENT_NODE_INCLUDE} } ...
from typing import Sequence, cast import prisma.enums import prisma.types AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = { "Input": True, "Output": True, "Webhook": True, "AgentBlock": True, } AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = { "Nodes": {"include": AGENT_NODE_INCLUDE} } ...
import importlib class LazyModule: def __init__(self, name, pip_name=None, import_error_msg=None): self.name = name self.pip_name = pip_name or name self.import_error_msg = import_error_msg or ( f"This requires the {self.name} module. " f"You can install it via `pip...
import importlib class LazyModule: def __init__(self, name, pip_name=None): self.name = name pip_name = pip_name or name self.pip_name = pip_name self.module = None self._available = None @property def available(self): if self._available is None: ...
import pytest import torch from docarray.computation.torch_backend import TorchCompBackend def test_to_device(): t = torch.rand(10, 3) assert t.device == torch.device('cpu') t = TorchCompBackend.to_device(t, 'meta') assert t.device == torch.device('meta') @pytest.mark.parametrize( 'array,result...
import pytest import torch from docarray.computation.torch_backend import TorchCompBackend def test_to_device(): t = torch.rand(10, 3) assert t.device == torch.device('cpu') t = TorchCompBackend.to_device(t, 'meta') assert t.device == torch.device('meta') @pytest.mark.parametrize( 'array,result...
from keras.src.backend.common.name_scope import name_scope from keras.src.backend.numpy import core from keras.src.backend.numpy import image from keras.src.backend.numpy import linalg from keras.src.backend.numpy import math from keras.src.backend.numpy import nn from keras.src.backend.numpy import numpy from keras.sr...
from keras.src.backend.numpy import core from keras.src.backend.numpy import image from keras.src.backend.numpy import linalg from keras.src.backend.numpy import math from keras.src.backend.numpy import nn from keras.src.backend.numpy import numpy from keras.src.backend.numpy import random from keras.src.backend.numpy....
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os from collections import Sequence from pathlib import Path import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.core.visualization import imshow_det_bboxes from mmdet.datasets.bu...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os from collections import Sequence from pathlib import Path import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.core.visualization import imshow_det_bboxes from mmdet.datasets.bu...
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class FlopsLoss(nn.Module): def __init__(self, model: SparseEncoder, threshold: float = None) -> None: """ ...
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class FlopsLoss(nn.Module): def __init__(self, model: SparseEncoder) -> None: super().__init__() self.mo...
from typing import Any, Dict, List, Optional, Sequence, Type, Union import PIL.Image import torch from torchvision import datapoints from torchvision.prototype.datapoints import Label, OneHotLabel from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import _get_fill, ...
from typing import Any, Dict, List, Optional, Sequence, Type, Union import PIL.Image import torch from torchvision import datapoints from torchvision.prototype.datapoints import Label, OneHotLabel from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import _get_fill, ...
from __future__ import annotations from collections.abc import Iterable import torch import torch.nn as nn import torch.nn.functional as F from sentence_transformers.sparse_encoder import SparseEncoder def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor: ...
from __future__ import annotations from collections.abc import Iterable import torch import torch.nn as nn import torch.nn.functional as F from sentence_transformers.sparse_encoder import SparseEncoder def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor: ...
import PIL.Image import torch from torchvision import datapoints from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal @_register_explicit_noop( PIL.Image.Image, datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask, warn_pas...
import PIL.Image import torch from torchvision import datapoints from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal @_register_explicit_noop( PIL.Image.Image, datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask, warn_pas...
import os from pathlib import Path from typing import Any, Callable, Optional, Tuple, Union from PIL import Image from .utils import check_integrity, download_and_extract_archive, download_url from .vision import VisionDataset class SBU(VisionDataset): """`SBU Captioned Photo <http://www.cs.virginia.edu/~vicent...
import os from pathlib import Path from typing import Any, Callable, Optional, Tuple, Union from PIL import Image from .utils import check_integrity, download_and_extract_archive, download_url from .vision import VisionDataset class SBU(VisionDataset): """`SBU Captioned Photo <http://www.cs.virginia.edu/~vicent...
from __future__ import annotations from typing import Optional, Type from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from pydantic import BaseModel from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.playwrig...
from __future__ import annotations from typing import Optional, Type from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from pydantic import BaseModel from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.playwrig...
"""Embeddings.""" from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: from langchain_core.embeddings.embeddings import Embeddings from langchain_core.embeddings.fake import ( DeterministicFakeEmbedding, FakeEmbeddings, ) __all__ = ["DeterministicFakeEmbe...
"""Embeddings.""" from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: from langchain_core.embeddings.embeddings import Embeddings from langchain_core.embeddings.fake import ( DeterministicFakeEmbedding, FakeEmbeddings, ) __all__ = ["DeterministicFakeEmbe...
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2021 Imperial College London (Pingchuan Ma) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import warnings import numpy as np from ibug.face_detection import RetinaFacePredictor warnings.filterwarnings("ignore") class LandmarksDetector: de...
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2021 Imperial College London (Pingchuan Ma) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import warnings import numpy as np import torchvision from ibug.face_detection import RetinaFacePredictor warnings.filterwarnings("ignore") class Landma...
import types from keras.src.activations.activations import celu from keras.src.activations.activations import elu from keras.src.activations.activations import exponential from keras.src.activations.activations import gelu from keras.src.activations.activations import glu from keras.src.activations.activations import ...
import types from keras.src.activations.activations import celu from keras.src.activations.activations import elu from keras.src.activations.activations import exponential from keras.src.activations.activations import gelu from keras.src.activations.activations import glu from keras.src.activations.activations import ...
import numpy as np import pytest from docarray import BaseDoc, DocList from docarray.typing import NdArray @pytest.mark.parametrize('shuffle', [False, True]) @pytest.mark.parametrize('stack', [False, True]) @pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)]) def test_batch(shuffle, stack, batch_siz...
import numpy as np import pytest from docarray import BaseDoc, DocList from docarray.typing import NdArray @pytest.mark.parametrize('shuffle', [False, True]) @pytest.mark.parametrize('stack', [False, True]) @pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)]) def test_batch(shuffle, stack, batch_siz...
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar from pydantic import create_model, create_model_from_typeddict from pydantic.config import BaseConfig from typing_extensions import TypedDict from docarray import BaseDoc if TYPE_CHECKING: from pydantic.typing import AnyClassMethod ...
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar from pydantic import create_model, create_model_from_typeddict from pydantic.config import BaseConfig from typing_extensions import TypedDict from docarray import BaseDoc if TYPE_CHECKING: from pydantic.typing import AnyClassMethod ...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_v...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_v...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py', './centernet_tta.py' ] dataset_type = 'CocoDataset' data_root = 'data/coco/' # model settings model = dict( type='CenterNet', data_preprocessor=dict( type='DetDataPrepro...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py', './centernet_tta.py' ] dataset_type = 'CocoDataset' data_root = 'data/coco/' # model settings model = dict( type='CenterNet', data_preprocessor=dict( type='DetDataPrepro...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
from __future__ import annotations from sentence_transformers import util from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseAnglELoss(SparseCoSENTLoss): def __init__(self, model: Spars...
from __future__ import annotations from sentence_transformers import util from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseAnglELoss(SparseCoSENTLoss): def __init__(self, model: Spars...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os from pathlib import Path import mmcv from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.core.visualization import imshow_det_bboxes from mmdet.datasets.builder import build_dataset def parse_args(): p...
import argparse import os from pathlib import Path import mmcv from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.core.visualization import imshow_det_bboxes from mmdet.datasets.builder import build_dataset def parse_args(): parser = argparse.ArgumentParser(description='Bro...
from llama_index.core.graph_stores.types import GraphStore from llama_index.graph_stores.memgraph import MemgraphGraphStore def test_memgraph_graph_store(): names_of_bases = [b.__name__ for b in MemgraphGraphStore.__bases__] assert GraphStore.__name__ in names_of_bases
from unittest.mock import MagicMock, patch from llama_index.core.graph_stores.types import GraphStore from llama_index.graph_stores.memgraph import MemgraphGraphStore @patch("llama_index.graph_stores.memgraph.MemgraphGraphStore") def test_memgraph_graph_store(MockMemgraphGraphStore: MagicMock): instance: Memgrap...
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # ...
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # ...
# coding: utf-8 import pytest import lightgbm as lgb from .utils import pickle_obj, unpickle_obj @pytest.mark.parametrize('serializer', ["pickle", "joblib", "cloudpickle"]) def test_early_stopping_callback_is_picklable(serializer, tmp_path): rounds = 5 callback = lgb.early_stopping(stopping_rounds=rounds) ...
# coding: utf-8 import pytest import lightgbm as lgb from .utils import pickle_obj, unpickle_obj @pytest.mark.parametrize('serializer', ["pickle", "joblib", "cloudpickle"]) def test_early_stopping_callback_is_picklable(serializer, tmp_path): callback = lgb.early_stopping(stopping_rounds=5) tmp_file = tmp_pa...