input
stringlengths
33
5k
output
stringlengths
32
5k
from __future__ import annotations import os import platform import subprocess from .optional_submodules import checkout_nccl from .setup_helpers.cmake import CMake, USE_NINJA from .setup_helpers.env import ( check_env_flag, check_negative_env_flag, IS_64BIT, IS_WINDOWS, ) def _get_vc_env(vc_arch: s...
from __future__ import annotations import os import platform from .optional_submodules import checkout_nccl from .setup_helpers.cmake import CMake, USE_NINJA from .setup_helpers.env import ( check_env_flag, check_negative_env_flag, IS_64BIT, IS_WINDOWS, ) def _get_vc_env(vc_arch: str) -> dict[str, s...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn import backend.data.block import backend.data.db import backend.data.graph import backend.data.user import backend.server.routers.v1 import backend.util.service import backend.util.se...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn import backend.data.block import backend.data.db import backend.data.graph import backend.data.user import backend.server.routers.v1 import backend.util.service import backend.util.se...
from __future__ import annotations from sentence_transformers.training_args import SentenceTransformerTrainingArguments class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments): """ CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments s...
from __future__ import annotations from sentence_transformers.training_args import SentenceTransformerTrainingArguments class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments): """ CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments s...
import numpy as np import pytest from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class CanaryLayer(layers.Layer): def __init__(self): super().__init__() self.training = None self.received_mask = False def...
import numpy as np import pytest from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class CanaryLayer(layers.Layer): def __init__(self): super().__init__() self.training = None self.received_mask = False def...
from jina.schemas.helper import _cli_to_schema from jina_cli.export import api_to_dict for s in ('flow', 'gateway', 'executor'): a = _cli_to_schema(api_to_dict(), s) table = ['| Name | Description | Type | Default |', '|----|----|----|----|'] for k, v in a[f'Jina::{s.capitalize()}']['properties'].items()...
from jina.schemas.helper import _cli_to_schema from jina_cli.export import api_to_dict for s in ('flow', 'gateway', 'executor'): a = _cli_to_schema(api_to_dict(), s) table = ['| Name | Description | Type | Default |', '|----|----|----|----|'] for k, v in a[f'Jina::{s.capitalize()}']['properties'].items()...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.19.5" SCIPY_MIN_VERSION = "1.6.0" JOBLIB_MIN_VERSION = "1...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.19.5" SCIPY_MIN_VERSION = "1.6.0" JOBLIB_MIN_VERSION = "1...
import os.path from pathlib import Path from typing import Any, Callable, List, Optional, Tuple, Union from PIL import Image from .vision import VisionDataset class CocoDetection(VisionDataset): """`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset. It requires the `COCO API to be insta...
import os.path from typing import Any, Callable, List, Optional, Tuple from PIL import Image from .vision import VisionDataset class CocoDetection(VisionDataset): """`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset. It requires the `COCO API to be installed <https://github.com/pdollar...
# mypy: allow-untyped-defs import logging from collections.abc import Sequence from typing import cast from ... import config from ...codecache import code_hash, get_path from ...scheduler import BaseSchedulerNode, BaseScheduling, SchedulerNode from ...utils import get_fused_kernel_name, get_kernel_metadata, sympy_pro...
# mypy: allow-untyped-defs import logging from collections.abc import Sequence from typing import cast from ... import config from ...codecache import code_hash, get_path from ...scheduler import BaseSchedulerNode, BaseScheduling, SchedulerNode from ...utils import get_fused_kernel_name, get_kernel_metadata, sympy_pro...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os from collections import Sequence from pathlib import Path import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.core.visualization import imshow_det_bboxes from mmdet.datasets.bu...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os from collections import Sequence from pathlib import Path import mmcv from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.core.visualization import imshow_det_bboxes from mmdet.datasets.builder import build_...
import pathlib from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper from torchvision.datapoints import BoundingBoxes from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils import Dataset, Enc...
import pathlib from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper from torchvision.datapoints import BoundingBox from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils import Dataset, Encod...
from workflows.events import ( Event, # noqa EventType, # noqa HumanResponseEvent, # noqa InputRequiredEvent, # noqa StartEvent, # noqa StopEvent, # noqa )
from _collections_abc import dict_items, dict_keys, dict_values from typing import Any, Dict, Type from llama_index.core.bridge.pydantic import ( BaseModel, ConfigDict, PrivateAttr, model_serializer, ) class Event(BaseModel): """ Base class for event types that mimics dict interface. Pri...
from typing import TYPE_CHECKING, List from docarray.typing.tensor.abstract_tensor import AbstractTensor if TYPE_CHECKING: from docarray.array import DocVec from docarray.array.any_array import AnyDocArray class DocArraySummary: def __init__(self, docs: 'AnyDocArray'): self.docs = docs def ...
from typing import TYPE_CHECKING, List from docarray.typing.tensor.abstract_tensor import AbstractTensor if TYPE_CHECKING: from docarray.array import DocArrayStacked from docarray.array.abstract_array import AnyDocArray class DocArraySummary: def __init__(self, da: 'AnyDocArray'): self.da = da ...
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to...
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
from __future__ import annotations from typing import Optional, Type from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from pydantic import BaseModel, Field from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.p...
from __future__ import annotations from typing import Optional, Type from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from pydantic import BaseModel, Field from langchain_community.tools.playwright.base import BaseBrowserTool from langchain_community.tools.p...
from __future__ import annotations import collections import json import logging import os import string from collections.abc import Iterable from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer logger = logging.getLogger(__nam...
from __future__ import annotations import collections import json import logging import os import string from typing import Iterable from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer logger = logging.getLogger(__name__) cl...
# Copyright (c) OpenMMLab. All rights reserved. import collections from mmcv.utils import build_from_cfg from ..builder import PIPELINES @PIPELINES.register_module() class Compose: """Compose multiple transforms sequentially. Args: transforms (Sequence[dict | callable]): Sequence of transform objec...
import collections from mmcv.utils import build_from_cfg from ..builder import PIPELINES @PIPELINES.register_module() class Compose: """Compose multiple transforms sequentially. Args: transforms (Sequence[dict | callable]): Sequence of transform object or config dict to be composed. ...
from typing import Any, Dict, Iterator import torch from ..utils import _log_api_usage_once from ._video_opt import ( _HAS_VIDEO_OPT, _probe_video_from_file, _probe_video_from_memory, _read_video_from_file, _read_video_from_memory, _read_video_timestamps_from_file, _read_video_timestamps_...
from typing import Any, Dict, Iterator import torch from ..utils import _log_api_usage_once try: from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER except ModuleNotFoundError: _HAS_GPU_VIDEO_DECODER = False from ._video_opt import ( _HAS_VIDEO_OPT, _probe_video_from_file, _probe_video_from_mem...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
""" Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task. Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings. It then computes an a...
""" Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task. Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings. It then computes an a...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.nasnet import NASNetLarge as NASNetLarge from keras.src.applications.nasnet import NASNetMobile as NASNetMobile from keras.src.applications.nasnet import ( decode_pre...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.nasnet import NASNetLarge from keras.src.applications.nasnet import NASNetMobile from keras.src.applications.nasnet import decode_predictions from keras.src.applications....
_base_ = './ms-rcnn_r50-caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
from typing import Any, Optional def json_to_markdown(data: Any, level: int = 0, header: Optional[str] = None) -> str: """ Recursively converts a Python object (from JSON) into a Markdown string. Args: data: The Python object to convert. level: The current nesting level (used for indentat...
from typing import Any def json_to_markdown(data: Any, level: int = 0, header: str | None = None) -> str: """ Recursively converts a Python object (from JSON) into a Markdown string. Args: data: The Python object to convert. level: The current nesting level (used for indentation and headi...
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py' model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) # use ca...
_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) # u...
import pytest import qdrant_client from docarray.index import QdrantDocumentIndex @pytest.fixture def qdrant() -> qdrant_client.QdrantClient: """This fixture takes care of removing the collection before each test case""" client = qdrant_client.QdrantClient(path='/tmp/qdrant-local') client.delete_collecti...
import uuid import pytest import qdrant_client from docarray.index import QdrantDocumentIndex @pytest.fixture def qdrant() -> qdrant_client.QdrantClient: """This fixture takes care of removing the collection before each test case""" client = qdrant_client.QdrantClient(path='/tmp/qdrant-local') client.de...
import os from typing import Callable, List import numpy as np import pytest import torch from jina import Document, DocumentArray from ...transform_encoder import TransformerTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_compute_tokens(): enc = TransformerTorchEncoder() tokens ...
import os from typing import Callable, List import numpy as np import pytest import torch from jina import Document, DocumentArray from jinahub.encoder.transform_encoder import TransformerTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_compute_tokens(): enc = TransformerTorchEncoder()...
from typing import Dict, TYPE_CHECKING, Optional if TYPE_CHECKING: # pragma: no cover from .workflow import Workflow class ServiceNotFoundError(Exception): """An error raised when the service manager couldn't find a certain service name.""" class ServiceManager: """ An helper class to decouple ho...
from typing import Dict, TYPE_CHECKING, Optional if TYPE_CHECKING: # pragma: no cover from .workflow import Workflow class ServiceNotFoundError(Exception): """An error raised when the service manager couldn't find a certain service name.""" class ServiceManager: """An helper class to decouple how ser...
from pydantic import BaseModel from typing import Optional, Dict, List class AlphaMatrix(BaseModel): """ This class is not necessary to understand to use a KodaRetriever - as it will be automatically instantiated if a dictionary is provided. Pydantic class to enforce the required fields for a KodaRetriev...
from pydantic import BaseModel from typing import Optional, Dict, List class AlphaMatrix(BaseModel): """ This class is not necessary to understand to use a KodaRetriever - as it will be automatically instantiated if a dictionary is provided. Pydantic class to enforce the required fields for a KodaRetriev...
from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image import torch from diffusers.utils import BaseOutput @dataclass class HunyuanVideoPipelineOutput(BaseOutput): r""" Output class for HunyuanVideo pipelines. Args: frames (`torch.Tensor`, `np.ndarr...
from dataclasses import dataclass import torch from diffusers.utils import BaseOutput @dataclass class HunyuanVideoPipelineOutput(BaseOutput): r""" Output class for HunyuanVideo pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): List of video out...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional # usort: skip from ._transform import Transform # usort: skip from ._presets import StereoMatching # usort: skip from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste from ._au...
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional # usort: skip from ._transform import Transform # usort: skip from ._presets import StereoMatching # usort: skip from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste from ._au...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='RetinaNet', backbone=dict( _delete_=True, type='PyramidVisionTransformerV2', embed_dims=32, ...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='RetinaNet', backbone=dict( _delete_=True, type='PyramidVisionTransformerV2', embed_dims=32, ...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_ct_from_file.py path/to/sentences.txt """ import gzip import...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_ct_from_file.py path/to/sentences.txt """ import gzip import...
# Copyright (c) OpenMMLab. All rights reserved. from .batch_sampler import (AspectRatioBatchSampler, MultiDataAspectRatioBatchSampler, TrackAspectRatioBatchSampler) from .class_aware_sampler import ClassAwareSampler from .custom_sample_size_sampler import CustomSa...
# Copyright (c) OpenMMLab. All rights reserved. from .batch_sampler import (AspectRatioBatchSampler, MultiDataAspectRatioBatchSampler, TrackAspectRatioBatchSampler) from .class_aware_sampler import ClassAwareSampler from .multi_data_sampler import MultiDataSampler...
# Copyright (c) OpenMMLab. All rights reserved. from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .builder import build_linear_layer, build_transformer from .ckpt_convert import pvt_convert from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .gaussian_target import gaussia...
# Copyright (c) OpenMMLab. All rights reserved. from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .builder import build_linear_layer, build_transformer from .ckpt_convert import pvt_convert from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .gaussian_target import gaussia...
import warnings import wave from abc import ABC from typing import BinaryIO, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils.misc import is_notebook T = TypeVar('T', bound='AbstractAudioTensor') MAX_INT_16 = 2**15 class AbstractAudioTensor(AbstractTensor, ABC): ...
import wave from abc import ABC from typing import BinaryIO, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor T = TypeVar('T', bound='AbstractAudioTensor') MAX_INT_16 = 2**15 class AbstractAudioTensor(AbstractTensor, ABC): def to_bytes(self): """ Convert audio te...
import pathlib from typing import Any, Dict, List, Union import torch from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper from torchvision.datapoints import Image from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils import Dataset, HttpResource, O...
import pathlib from typing import Any, Dict, List, Union import torch from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper from torchvision.prototype.datapoints import Image, Label from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource from torchvision.pro...
from typing import Dict from jina import Flow, DocumentArray, Document, Executor, Client, requests ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}} OVERRIDEN_EXECUTOR1_PARAMS = { 'param1': 'changed', 'param2': 60, 'exec_name': {'param1': 'changed'}, } class DummyOverrid...
from typing import Dict from jina import Flow, DocumentArray, Document, Executor, Client, requests ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}} OVERRIDEN_EXECUTOR1_PARAMS = { 'param1': 'changed', 'param2': 60, 'exec_name': {'param1': 'changed'}, } exposed_port = 12345...
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.load import dataset_module_factory, import_main_c...
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.load import dataset_module_factory, import_main_c...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseBBoxCoder(metaclass=ABCMeta): """Base bounding box coder. Args: use_box_type (bool): Whether to warp decoded boxes with the box type data structure. Defaults to False. """ # The size...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseBBoxCoder(metaclass=ABCMeta): """Base bounding box coder. Args: use_box_type (bool): Whether to warp decoded boxes with the boxlist data structure. Defaults to False. """ # The size ...
from __future__ import annotations import sys from .classification import CrossEncoderClassificationEvaluator from .correlation import CrossEncoderCorrelationEvaluator from .deprecated import ( CEBinaryAccuracyEvaluator, CEBinaryClassificationEvaluator, CECorrelationEvaluator, CEF1Evaluator, CERer...
from __future__ import annotations # TODO: Consider renaming all evaluators to CrossEncoder..., e.g. CrossEncoderNanoBEIREvaluator, CrossEncoderClassificationEvaluator, etc. from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator fro...
from typing import Dict MISTRALAI_MODELS: Dict[str, int] = { "mistral-tiny": 32000, "mistral-small": 32000, "mistral-medium": 32000, "mistral-large": 32000, "open-mixtral-8x7b": 32000, "open-mistral-7b": 32000, "open-mixtral-8x22b": 64000, "mistral-small-latest": 32000, "mistral-med...
from typing import Dict MISTRALAI_MODELS: Dict[str, int] = { "mistral-tiny": 32000, "mistral-small": 32000, "mistral-medium": 32000, "mistral-large": 32000, "open-mixtral-8x7b": 32000, "open-mistral-7b": 32000, "open-mixtral-8x22b": 64000, "mistral-small-latest": 32000, "mistral-med...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handlin...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handlin...
"""Standard LangChain interface tests.""" from langchain_core.language_models import BaseChatModel from langchain_tests.integration_tests import ( # type: ignore[import-not-found] ChatModelIntegrationTests, # type: ignore[import-not-found] ) from langchain_mistralai import ChatMistralAI class TestMistralStand...
"""Standard LangChain interface tests""" from langchain_core.language_models import BaseChatModel from langchain_tests.integration_tests import ( # type: ignore[import-not-found] ChatModelIntegrationTests, # type: ignore[import-not-found] ) from langchain_mistralai import ChatMistralAI class TestMistralStanda...
from keras.src import ops from keras.src import tree from keras.src.api_export import keras_export from keras.src.layers.layer import Layer from keras.src.saving import serialization_lib @keras_export("keras.layers.StackedRNNCells") class StackedRNNCells(Layer): """Wrapper allowing a stack of RNN cells to behave ...
from keras.src import ops from keras.src import tree from keras.src.api_export import keras_export from keras.src.layers.layer import Layer from keras.src.saving import serialization_lib @keras_export("keras.layers.StackedRNNCells") class StackedRNNCells(Layer): """Wrapper allowing a stack of RNN cells to behave ...
import os import pytest from jina import Client, Document, Executor, Flow, requests cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def flow(request, port_generator): exposed_port = port_generator() flow_src = request.param if flow_src == 'flow-yml': return Flow.load_conf...
import os import pytest from jina import Client, Document, Executor, Flow, requests cur_dir = os.path.dirname(os.path.abspath(__file__)) exposed_port = 12345 @pytest.fixture() def flow(request): flow_src = request.param if flow_src == 'flow-yml': return Flow.load_config(os.path.join(cur_dir, 'flow....
import wave from typing import Union, BinaryIO, TYPE_CHECKING import numpy as np if TYPE_CHECKING: from docarray.typing import T class AudioDataMixin: """Provide helper functions for :class:`Document` to support audio data.""" def save_audio_tensor_to_file( self: 'T', file: Union[str, B...
import wave from typing import Union, BinaryIO, TYPE_CHECKING import numpy as np if TYPE_CHECKING: from docarray.typing import T class AudioDataMixin: """Provide helper functions for :class:`Document` to support audio data.""" def save_audio_tensor_to_file( self: 'T', file: Union[str, B...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode @_register_proto(proto_type_name='audio_torch_tensor') class AudioTorchTensor(AbstractAudioTensor,...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode @_register_proto(proto_type_name='audio_torch_tensor') class AudioTorchTensor(AbstractAudioTensor,...
from typing import Optional import pytest from langchain_cli.constants import ( DEFAULT_GIT_REF, DEFAULT_GIT_REPO, DEFAULT_GIT_SUBDIRECTORY, ) from langchain_cli.utils.git import DependencySource, parse_dependency_string def _assert_dependency_equals( dep: DependencySource, *, git: Optional[...
from typing import Dict, Optional import pytest from langchain_cli.constants import ( DEFAULT_GIT_REF, DEFAULT_GIT_REPO, DEFAULT_GIT_SUBDIRECTORY, ) from langchain_cli.utils.git import DependencySource, parse_dependency_string def _assert_dependency_equals( dep: DependencySource, *, git: Opt...
import enum from typing import Any, List, Optional, Union import pydantic import backend.data.graph from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash class Methods(enum.Enum): SUBSCRIBE = "subscribe" UNSUBSCRIBE = "unsubscribe" EXECUTION_EVENT = "execution_event" ERROR = "error" ...
import enum from typing import Any, List, Optional, Union import pydantic import backend.data.graph from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash class Methods(enum.Enum): SUBSCRIBE = "subscribe" UNSUBSCRIBE = "unsubscribe" EXECUTION_EVENT = "execution_event" ERROR = "error" ...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
from abc import abstractmethod from typing import Any, Optional, Protocol, Sequence, runtime_checkable from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool from pydantic import Field from langchain_community.llms.gradient...
from abc import abstractmethod from typing import Any, Optional, Protocol, Sequence, runtime_checkable from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool from pydantic import Field from langchain_community.llms.gradient...
# Copyright (c) OpenMMLab. All rights reserved. import importlib import os.path as osp from mmengine.config import Config from mmengine.config.utils import (_get_cfg_metainfo, _get_external_cfg_base_path, _get_package_and_cfg_path) from mmengine.reg...
# Copyright (c) OpenMMLab. All rights reserved. import importlib import os.path as osp from mmengine.config import Config from mmengine.config.utils import (_get_cfg_metainfo, _get_external_cfg_base_path, _get_package_and_cfg_path) from mmengine.reg...
""" Initializer script that installs stuff to pip. """ from __future__ import annotations import argparse import logging import os import shutil import subprocess import sys import time def run_command( args: list[str], env: dict[str, str] | None = None, ) -> subprocess.CompletedProcess[str]: logging.de...
""" Initializer script that installs stuff to pip. """ from __future__ import annotations import argparse import logging import os import shutil import subprocess import sys import time def run_command(args: list[str]) -> subprocess.CompletedProcess[bytes]: logging.debug("$ %s", " ".join(args)) start_time =...
"""XGBoost Experimental Federated Learning related API.""" import ctypes from threading import Thread from typing import Any, Dict, Optional from .core import _LIB, _check_call, make_jcargs from .tracker import RabitTracker class FederatedTracker(RabitTracker): """Tracker for federated training. Parameters...
"""XGBoost Experimental Federated Learning related API.""" import ctypes from threading import Thread from typing import Any, Dict, Optional from .core import _LIB, _check_call, make_jcargs from .tracker import RabitTracker class FederatedTracker(RabitTracker): """Tracker for federated training. Parameters...
class XYXY: """XYXY contains axis indices for the XYXY format. All values in the XYXY format should be absolute pixel values. The XYXY format consists of the following required indices: - LEFT: left of the bounding box - TOP: top of the bounding box - RIGHT: right of the bounding box - BO...
class XYXY: """XYXY contains axis indices for the XYXY format. All values in the XYXY format should be absolute pixel values. The XYXY format consists of the following required indices: - LEFT: left of the bounding box - TOP: top of the bounding box - RIGHT: right of the bounding box - BO...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.transforms import LoadImageFromFile from mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations from mmdet.registry import TRANSFORMS def get_loading_pipeline(pipeline): """Only keep loading image and annotations related configuration....
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings from mmcv.transforms import LoadImageFromFile from mmdet.datasets.pipelines import LoadAnnotations, LoadPanopticAnnotations from mmdet.registry import TRANSFORMS def replace_ImageToTensor(pipelines): """Replace the ImageToTensor transfo...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import numpy as np import torch from mmengine.config import ConfigDict from mmengine.structures import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import SOLOV2Head from mmdet.structures.mask import BitmapMasks ...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import numpy as np import torch from mmengine.config import ConfigDict from mmengine.data import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import SOLOV2Head from mmdet.structures.mask import BitmapMasks def _r...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import numpy as np import pytest import torch import torchvision.models.video as models from jina import Document, DocumentArray, Executor from torchvision import transforms from ...vid...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import numpy as np import pytest import torch import torchvision.models.video as models from jina import Document, DocumentArray, Executor from torchvision import transforms from ...vid...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseBBoxCoder(metaclass=ABCMeta): """Base bounding box coder.""" def __init__(self, **kwargs): pass @abstractmethod def encode(self, bboxes, gt_bboxes): """Encode deltas between bboxes and g...
from abc import ABCMeta, abstractmethod class BaseBBoxCoder(metaclass=ABCMeta): """Base bounding box coder.""" def __init__(self, **kwargs): pass @abstractmethod def encode(self, bboxes, gt_bboxes): """Encode deltas between bboxes and ground truth boxes.""" @abstractmethod d...
"""Pydantic v1 compatibility shim.""" from langchain_core._api import warn_deprecated try: from pydantic.v1.dataclasses import * # noqa: F403 except ImportError: from pydantic.dataclasses import * # noqa: F403 warn_deprecated( "0.3.0", removal="1.0.0", alternative="pydantic.v1 or pydantic", ...
"""Pydantic v1 compatibility shim.""" from langchain_core._api import warn_deprecated try: from pydantic.v1.dataclasses import * # noqa: F403 except ImportError: from pydantic.dataclasses import * # type: ignore # noqa: F403 warn_deprecated( "0.3.0", removal="1.0.0", alternative="pydantic.v1 or...
"""OpenAI Finetuning.""" import logging import json import os import requests from typing import Any, Optional from openai import AzureOpenAI as SyncAzureOpenAI from llama_index.core.llms.llm import LLM from llama_index.finetuning.callbacks.finetuning_handler import OpenAIFineTuningHandler from llama_index.finetunin...
"""OpenAI Finetuning.""" import logging import json import os import requests from typing import Any, Optional from openai import AzureOpenAI as SyncAzureOpenAI from llama_index.core.llms.llm import LLM from llama_index.finetuning.callbacks.finetuning_handler import OpenAIFineTuningHandler from llama_index.finetunin...
import numpy as np import pytest from absl.testing import parameterized from keras.src import layers from keras.src import models from keras.src import ops from keras.src import testing from keras.src.utils import summary_utils class SummaryUtilsTest(testing.TestCase): @parameterized.parameters([("adam",), (None...
import numpy as np import pytest from absl.testing import parameterized from keras.src import layers from keras.src import models from keras.src import ops from keras.src import testing from keras.src.utils import summary_utils class SummaryUtilsTest(testing.TestCase): @parameterized.parameters([("adam",), (None...
# pants requires this import to recognize the dep import pytest_asyncio # noqa: F401 import pytest import os from typing import Generator # this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it # after the test. it also returns the value of the NVIDIA_API_KEY environment variable # b...
import pytest import os from typing import Generator # this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it # after the test. it also returns the value of the NVIDIA_API_KEY environment variable # before it was masked so that it can be used in the test. @pytest.fixture() def masked_en...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.activations import deserialize from keras.src.activations import get from keras.src.activations import serialize from keras.src.activations.activations import celu from keras.src.acti...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.activations import deserialize from keras.src.activations import get from keras.src.activations import serialize from keras.src.activations.activations import celu from keras.src.acti...
""" Python polyfills for sys """ from __future__ import annotations import sys from ..decorators import substitute_in_graph __all__ = [ "intern", "getrecursionlimit", ] @substitute_in_graph(sys.intern, can_constant_fold_through=True) def intern(string: str, /) -> str: return string @substitute_in_g...
""" Python polyfills for sys """ from __future__ import annotations import sys from ..decorators import substitute_in_graph __all__ = [ "intern", "getrecursionlimit", ] @substitute_in_graph(sys.intern, can_constant_fold_through=True) def intern(string: str, /) -> str: return string @substitute_in_g...
"""An internal script to process `new_model_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`. This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form ``` <{url}|New failed tests> { "GH_ydshieh": { "vit": 1 } } ``` """ import ...
"""An internal script to process `new_model_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`. This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form ``` <{url}|New failed tests> { "GH_ydshieh": { "vit": 1 } } ``` """ import ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras._tf_keras.keras.preprocessing import image from keras._tf_keras.keras.preprocessing import sequence from keras.src.utils.image_dataset_utils import image_dataset_from_directory from keras...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api.preprocessing import image from keras.api.preprocessing import sequence from keras.src.utils.image_dataset_utils import image_dataset_from_directory from keras.src.utils.text_dataset_...
"""Memory used to save agent output AND intermediate steps.""" from typing import Any from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain.agents.format_scratchpad import ( format_to_openai_function_messages, format_to_...
"""Memory used to save agent output AND intermediate steps.""" from typing import Any, Dict, List from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain.agents.format_scratchpad import ( format_to_openai_function_messages, ...
from datetime import datetime, timezone import pytest from prisma.enums import CreditTransactionType from prisma.models import CreditTransaction from backend.blocks.llm import AITextGeneratorBlock from backend.data.block import get_block from backend.data.credit import BetaUserCredit from backend.data.execution impor...
from datetime import datetime, timezone import pytest from prisma.enums import CreditTransactionType from prisma.models import CreditTransaction from backend.blocks.llm import AITextGeneratorBlock from backend.data.credit import BetaUserCredit from backend.data.execution import NodeExecutionEntry from backend.data.us...
import os from jina import Flow, DocumentArray, Document # noinspection PyUnresolvedReferences from jinahub.indexers.DocCache.doc_cache import DocCache from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage def test_cache(tmpdir): os.environ['CACHE_WORKSPACE'] = os.path.join(tmpdir, 'cache') ...
import os from jina import Flow, DocumentArray, Document # noinspection PyUnresolvedReferences from jinahub.indexers.DocCache import DocCache from jinahub.indexers.storage.LMDBStorage import LMDBStorage def test_cache(tmpdir): os.environ['CACHE_WORKSPACE'] = os.path.join(tmpdir, 'cache') os.environ['STORAGE...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.config import ConfigDict from mmdet.core.utils import OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .two_stage import TwoStageDetector @MODELS.register_module() class MaskRCNN(TwoStageDetector): """Implementation of `Mask R-CNN ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union from mmengine.config import ConfigDict from mmdet.registry import MODELS from .two_stage import TwoStageDetector @MODELS.register_module() class MaskRCNN(TwoStageDetector): """Implementation of `Mask R-CNN <https://arxiv.org/abs/...
# Copyright (c) OpenMMLab. All rights reserved. from .backbones import * # noqa: F401,F403 from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, build_backbone, build_detector, build_head, build_loss, build_neck, ...
# Copyright (c) OpenMMLab. All rights reserved. from .backbones import * # noqa: F401,F403 from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, build_backbone, build_detector, build_head, build_loss, build_neck, ...
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' # learning policy max_epochs = 24 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', ...
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) model = dict( # use caffe img_norm preprocess_cfg=preprocess_cfg, backb...
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', che...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class GeneratorDatasetInputStream(AbstractDatasetInputStream): def __init__( self, generator: Callable, features: Optional...
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class GeneratorDatasetInputStream(AbstractDatasetInputStream): def __init__( self, generator: Callable, features: Optional...
"""Parser for JSON output.""" from __future__ import annotations import json from json import JSONDecodeError from typing import Annotated, Any, Optional, TypeVar, Union import jsonpatch # type: ignore[import] import pydantic from pydantic import SkipValidation from langchain_core.exceptions import OutputParserExc...
"""Parser for JSON output.""" from __future__ import annotations import json from json import JSONDecodeError from typing import Annotated, Any, Optional, TypeVar, Union import jsonpatch # type: ignore[import] import pydantic from pydantic import SkipValidation from langchain_core.exceptions import OutputParserExc...
from typing import Any, Dict, Union from torchvision import datapoints from torchvision.transforms.v2 import functional as F, Transform class ConvertBoundingBoxFormat(Transform): """[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY". .. v2betastatus:: ConvertBounding...
from typing import Any, Dict, Union import torch from torchvision import datapoints, transforms as _transforms from torchvision.transforms.v2 import functional as F, Transform from .utils import is_simple_tensor class ConvertBoundingBoxFormat(Transform): """[BETA] Convert bounding box coordinates to the given ...
from __future__ import annotations from collections.abc import Iterable import torch import torch.nn as nn import torch.nn.functional as F from sentence_transformers.sparse_encoder import SparseEncoder def normalized_mean_squared_error( reconstruction: torch.Tensor, original_input: torch.Tensor, ) -> torch...
from __future__ import annotations from collections.abc import Iterable import torch import torch.nn as nn import torch.nn.functional as F from sentence_transformers.sparse_encoder import SparseEncoder class ReconstructionLoss(nn.Module): """ Reconstruction Loss module for Sparse AutoEncoder. This mod...
from typing import TYPE_CHECKING, Type, TypeVar, Union from uuid import UUID from pydantic import BaseConfig, parse_obj_as from pydantic.fields import ModelField from docarray.typing.proto_register import _register_proto if TYPE_CHECKING: from docarray.proto import NodeProto from docarray.typing.abstract_type i...
from typing import TYPE_CHECKING, Type, TypeVar, Union from uuid import UUID from pydantic import BaseConfig, parse_obj_as from pydantic.fields import ModelField if TYPE_CHECKING: from docarray.proto import NodeProto from docarray.typing.abstract_type import AbstractType T = TypeVar('T', bound='ID') class ID(...
# Copyright (c) OpenMMLab. All rights reserved. from .local_visualizer import DetLocalVisualizer, TrackLocalVisualizer from .palette import get_palette, jitter_color, palette_val __all__ = [ 'palette_val', 'get_palette', 'DetLocalVisualizer', 'jitter_color', 'TrackLocalVisualizer' ]
# Copyright (c) OpenMMLab. All rights reserved. from .local_visualizer import DetLocalVisualizer from .palette import get_palette, jitter_color, palette_val __all__ = ['palette_val', 'get_palette', 'DetLocalVisualizer', 'jitter_color']
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path from typing import Dict, Tuple import numpy as np import pytest from executor.torch_encoder import ImageTorchEncoder from jina import Document, DocumentArray, Executor def test_config(): ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path from typing import Dict, Tuple import numpy as np import pytest from jina import Document, DocumentArray, Executor from ...torch_encoder import ImageTorchEncoder def test_config(): ex ...
from __future__ import annotations __version__ = "3.5.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.backend import ( export_dynamic_quantized_onnx_model, export_optimized_onnx_model, export_static_quantized_openvino_model, ) from senten...
from __future__ import annotations __version__ = "3.5.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.backend import ( export_dynamic_quantized_onnx_model, export_optimized_onnx_model, export_static_quantized_openvino_model, ) from senten...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp import matplotlib.patches as mpatches import matplotlib.pyplot as plt import mmcv import numpy as np try: import imageio except ImportError: imageio = None # TODO verify after refactoring analyze_results.py def p...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp import matplotlib.patches as mpatches import matplotlib.pyplot as plt import mmcv import numpy as np try: import imageio except ImportError: imageio = None def parse_args(): parser = argparse.ArgumentParser(d...
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py'] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './mask2former_r50_lsj_8x2_50e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
from __future__ import annotations from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator from sentence_transformers.sparse_encoder.evaluation import ( SparseBinaryClassificationEvaluator, SparseEmbeddingSimilarityEvaluator, SparseInformationRetrievalEvaluator, SparseM...
from __future__ import annotations from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator from sentence_transformers.sparse_encoder.evaluation import ( SparseBinaryClassificationEvaluator, SparseEmbeddingSimilarityEvaluator, SparseInformationRetrievalEvaluator, SparseM...
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem from .utils import require_lz4, require_zstandard def tes...
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lz4, require_zstan...
import os import shutil import subprocess from pathlib import Path import pytest @pytest.fixture(scope="session", autouse=True) def download_cache(): os.system('scripts/download_full.sh') yield shutil.rmtree('.cache', ignore_errors=True) @pytest.fixture(scope='session') def docker_image_name() -> str: ...
import os import shutil import pytest @pytest.fixture(scope="session", autouse=True) def download_cache(): os.system('scripts/download_full.sh') yield shutil.rmtree('.cache', ignore_errors=True)
import pytest from docarray.documents import Video from docarray.typing import AudioNdArray, NdArray, VideoNdArray from tests import TOYDATA_DIR LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4') REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noq...
import pytest from docarray.documents import Video from docarray.typing import AudioNdArray, NdArray, VideoNdArray from tests import TOYDATA_DIR LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4') REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noq...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detection/coco/' # Method 2: Us...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') tra...
import os from typing import Any, Optional from llama_index.llms.openai_like import OpenAILike class Cerebras(OpenAILike): """ Cerebras LLM. Examples: `pip install llama-index-llms-cerebras` ```python from llama_index.llms.cerebras import Cerebras # Set up the Cerebras ...
import os from typing import Any, Optional from llama_index.llms.openai_like import OpenAILike class Cerebras(OpenAILike): """ Cerebras LLM. Examples: `pip install llama-index-llms-cerebras` ```python from llama_index.llms.cerebras import Cerebras # Set up the Cerebras ...
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve __all__ = [ "add_noise", "adsr_envelope", "barkscale_fbanks", "convolve", "extend_pitch", "fftconvolve", "oscillator_bank", ]
from ._dsp import adsr_envelope, oscillator_bank from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve __all__ = [ "add_noise", "adsr_envelope", "barkscale_fbanks", "convolve", "fftconvolve", "oscillator_bank", ]
import logging from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseEncoder, SparseRerankingEvaluator, SpladePooling, ) logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) # Initialize the SPLA...
from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseEncoder, SparseRerankingEvaluator, SpladePooling, ) # Initialize the SPLADE model model_name = "naver/splade-cocondenser-ensembledistil" model = SparseEncoder( modules=[ MLMTransforme...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import glob import os.path as osp from mmengine.config import Config from mmengine.fileio import dump, load from mmengine.utils import mkdir_or_exist def parse_args(): parser = argparse.ArgumentParser( description='Gather benchmarked models ...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import glob import os.path as osp import mmcv from mmcv import Config def parse_args(): parser = argparse.ArgumentParser( description='Gather benchmarked models metric') parser.add_argument('config', help='test config file path') par...
import os import pytest import requests from jina import Flow from tests.helper import ( ProcessExecutor, _validate_custom_gateway_process, _validate_dummy_custom_gateway_response, ) from tests.unit.yaml.dummy_gateway import DummyGateway from tests.unit.yaml.dummy_gateway_get_streamer import DummyGatewayG...
import os import pytest import requests from jina import Flow from tests.helper import ( ProcessExecutor, _validate_custom_gateway_process, _validate_dummy_custom_gateway_response, ) from tests.unit.yaml.dummy_gateway import DummyGateway from tests.unit.yaml.dummy_gateway_get_streamer import DummyGatewayG...
import gc import unittest import torch from diffusers import ( StableDiffusionUpscalePipeline, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, ...
import gc import unittest import torch from diffusers import ( StableDiffusionUpscalePipeline, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, numpy_cosine_similarity_distance, require_torch_gpu, slow, ) from .single_file_testing_utils...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.backend.config import backend from keras.src.backend.config import epsilon from keras.src.backend.config import floatx from keras.src.backend.config import image_data_format from kera...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.backend.config import backend from keras.src.backend.config import epsilon from keras.src.backend.config import floatx from keras.src.backend.config import image_data_format from kera...