input
stringlengths
33
5k
output
stringlengths
32
5k
"""Utilities for working with HTML.""" import logging import re from collections.abc import Sequence from typing import Optional, Union from urllib.parse import urljoin, urlparse logger = logging.getLogger(__name__) PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#") SUFFIXES_TO_IGNORE = ( ".css", ".js", ...
"""Utilities for working with HTML.""" import logging import re from collections.abc import Sequence from typing import Optional, Union from urllib.parse import urljoin, urlparse logger = logging.getLogger(__name__) PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#") SUFFIXES_TO_IGNORE = ( ".css", ".js", ...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, ) from keras.src.ops.core import _saturate_cast @keras_export("keras.layers.AutoContrast") class Au...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, ) from keras.src.ops.core import _saturate_cast @keras_export("keras.layers.AutoContrast") class Au...
import pytest from docarray import DocumentArray, Document from docarray.array.weaviate import DocumentArrayWeaviate import numpy as np @pytest.fixture() def docs(): return DocumentArray([Document(id=f'{i}') for i in range(1, 10)]) @pytest.mark.parametrize( 'to_delete', [ 0, 1, ...
import pytest from docarray import DocumentArray, Document from docarray.array.weaviate import DocumentArrayWeaviate import numpy as np @pytest.fixture() def docs(): return DocumentArray([Document(id=f'{i}') for i in range(1, 10)]) @pytest.mark.parametrize( 'to_delete', [ 0, 1, ...
""" This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled, for example with mean-pooling. """ from torch.utils.data import DataLoader import math from sentence_transformers import models, losses, util from sentence_transformers import LoggingHandler, SentenceTransformer from s...
""" This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled, for example with mean-pooling. """ from torch.utils.data import DataLoader import math from sentence_transformers import models, losses, util from sentence_transformers import LoggingHandler, SentenceTransformer from s...
# Copyright (c) OpenMMLab. All rights reserved. import pytest from mmdet.datasets import DATASETS def test_xml_dataset(): dataconfig = { 'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt', 'img_prefix': 'data/VOCdevkit/VOC2007/', 'pipeline': [{ 'type': 'LoadImageFrom...
import pytest from mmdet.datasets import DATASETS def test_xml_dataset(): dataconfig = { 'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt', 'img_prefix': 'data/VOCdevkit/VOC2007/', 'pipeline': [{ 'type': 'LoadImageFromFile' }] } XMLDataset = DATASETS...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth' model = dict( backbone=dict(init_cfg=dict(type='Pretrained', chec...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth' model = dict( backbone=dict(init_cfg=dict(type='Pretrained', chec...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # training schedule, voc dataset is repeated 3 times, in # `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12 max_epoch...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # training schedule, voc dataset is repeated 3 times, in # `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12 max_epoch...
import torch from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from .functional_test_impl import Functional64OnlyTestImpl, FunctionalTestImpl @skipIfNoCuda class FunctionalFloat32CUDATest(FunctionalTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda", 0) @...
import torch from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from .functional_test_impl import FunctionalTestImpl @skipIfNoCuda class FunctionalFloat32CUDATest(FunctionalTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda") @skipIfNoCuda class Functional...
import pathlib from typing import Any, Callable, Optional, Tuple, Union from PIL import Image from .utils import verify_str_arg from .vision import VisionDataset class StanfordCars(VisionDataset): """Stanford Cars Dataset The Cars dataset contains 16,185 images of 196 classes of cars. The data is spli...
import pathlib from typing import Any, Callable, Optional, Tuple from PIL import Image from .utils import verify_str_arg from .vision import VisionDataset class StanfordCars(VisionDataset): """Stanford Cars Dataset The Cars dataset contains 16,185 images of 196 classes of cars. The data is split into ...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import List, Optional, Tuple import torch import torch.nn as nn from mmcv import ops from mmengine.model import BaseModule from torch import Tensor from mmdet.core.utils.typing import ConfigType, OptMultiConfig class...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch import torch.nn as nn from mmcv import ops from mmengine.model import BaseModule class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): """Base class for RoI extractor. Args: roi_layer (dict): Specif...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, ) @keras_export("keras.layers.RandomGrayscale") class RandomGrayscale(BaseImagePreprocessingLayer):...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, ) @keras_export("keras.layers.RandomGrayscale") class RandomGrayscale(BaseImagePreprocessingLayer):...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Tuple, Dict import pytest import numpy as np from jina import DocumentArray, Document from ...torch_encoder import ImageTorchEncoder @pytest.mark.parametrize( ['content', 'out_shape'], ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Tuple, Dict import pytest import numpy as np from jina import DocumentArray, Document try: from torch_encoder import ImageTorchEncoder except: from jinahub.image.encoder.torch_encoder im...
from functools import partial from inspect import isclass from typing import Any, Union, cast from pydantic import BaseModel from langchain_core.language_models import FakeListChatModel from langchain_core.load.dump import dumps from langchain_core.load.load import loads from langchain_core.messages import HumanMessa...
from functools import partial from inspect import isclass from typing import Any, Union, cast from pydantic import BaseModel from langchain_core.language_models import FakeListChatModel from langchain_core.load.dump import dumps from langchain_core.load.load import loads from langchain_core.messages import HumanMessa...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.legacy.losses import Reduction from keras.src.losses import deserialize from keras.src.losses import get from keras.src.losses import serialize from keras.src.losses.loss import Loss ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.legacy.losses import Reduction from keras.src.losses import deserialize from keras.src.losses import get from keras.src.losses import serialize from keras.src.losses.loss import Loss ...
import logging from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembledistil") datasets = ["QuoraRetrieval...
import logging from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembledistil") datasets = ["QuoraRetrieval...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# Copyright (c) OpenMMLab. All rights reserved. from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset, ADE20KSegDataset) from .base_det_dataset import BaseDetDataset from .base_semseg_dataset import BaseSegDataset from .base_video_dataset import BaseVideoDataset from .cityscapes import ...
# Copyright (c) OpenMMLab. All rights reserved. from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset, ADE20KSegDataset) from .base_det_dataset import BaseDetDataset from .base_semseg_dataset import BaseSegDataset from .base_video_dataset import BaseVideoDataset from .cityscapes import ...
import time from datasets import load_dataset from sentence_transformers import SentenceTransformer from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch # 1. Load the quora corpus with questions dataset = load_dataset("quora", split="train").map( lambda batch: {"text": [text...
import time from sentence_transformers import SentenceTransformer from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch from datasets import load_dataset # 1. Load the quora corpus with questions dataset = load_dataset("quora", split="train").map( lambda batch: {"text": [text ...
import unittest import torch import torchaudio.prototype.functional as F from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script class TorchScriptConsistencyTestImpl(TestBaseMixin): def _assert_consistency(self, func, inputs, shape_only=False): inputs_ = [] for i i...
import unittest import torch import torchaudio.prototype.functional as F from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script class TorchScriptConsistencyTestImpl(TestBaseMixin): def _assert_consistency(self, func, inputs, shape_only=False): inputs_ = [] for i i...
from __future__ import annotations import torch from sentence_transformers.models.Module import Module class SpladePooling(Module): """ SPLADE Pooling module for creating the sparse embeddings. This module implements the SPLADE pooling mechanism that: 1. Takes token logits from a masked language m...
from __future__ import annotations import json import os from typing import Any import torch from torch import nn class SpladePooling(nn.Module): """ SPLADE Pooling module for creating the sparse embeddings. This module implements the SPLADE pooling mechanism that: 1. Takes token logits from a mas...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class ATSS(SingleStageDetector): """Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class ATSS(SingleStageDetector): """Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`...
from __future__ import annotations __version__ = "3.1.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset from sentence_t...
__version__ = "3.1.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset from sentence_transformers.LoggingHandler import Lo...
__version__ = '0.30.0' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler() formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s") hand...
__version__ = '0.21.1' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field from docarray.helper import login, logout if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
# Copyright (c) OpenMMLab. All rights reserved. from .file_client import (BaseStorageBackend, FileClient, HardDiskBackend, HTTPBackend, LmdbBackend, MemcachedBackend, PetrelBackend) from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler from .i...
# Copyright (c) OpenMMLab. All rights reserved. from .file_client import BaseStorageBackend, FileClient from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler from .io import dump, load, register_handler from .parse import dict_from_file, list_from_file __all__ = [ 'BaseStorageBackend', 'Fi...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import numpy as np import pytest from jina import Document, DocumentArray, Flow from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import export_dump_stream...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import numpy as np import pytest from jina import Document, DocumentArray, Flow from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import export_dump_stream...
from pydantic import AnyUrl as BaseAnyUrl from docarray.document.base_node import BaseNode from docarray.proto import NodeProto class AnyUrl(BaseAnyUrl, BaseNode): def _to_node_protobuf(self) -> NodeProto: """Convert Document into a NodeProto protobuf message. This function should be called when ...
from pydantic import AnyUrl as BaseAnyUrl from docarray.document.base_node import BaseNode from docarray.proto import NodeProto class AnyUrl(BaseAnyUrl, BaseNode): def _to_node_protobuf(self) -> NodeProto: """Convert Document into a NodeProto protobuf message. This function should be called when ...
__version__ = '0.13.13' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
__version__ = '0.13.12' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
import os import sys import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm from xgboost.core import DataSplitMode try: import pandas as pd import pyarrow as pa import pyarrow.csv as pc except ImportError: pass pytestmark = pytest.mark.skipif( tm.no_arrow()["con...
import os import sys import unittest import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm from xgboost.core import DataSplitMode try: import pandas as pd import pyarrow as pa import pyarrow.csv as pc except ImportError: pass pytestmark = pytest.mark.skipif( tm...
"""Pass input through a moderation endpoint.""" from typing import Any, Optional from langchain_core.callbacks import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain_core.utils import check_package_version, get_from_dict_or_env from pydantic import Field, model_validator from ...
"""Pass input through a moderation endpoint.""" from typing import Any, Optional from langchain_core.callbacks import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain_core.utils import check_package_version, get_from_dict_or_env from pydantic import Field, model_validator from ...
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] model = dict( roi_head=dict(bbox_head=dict(num_classes=500)), test_cfg=dict(rcnn=dict(score_thr=0.01))) # dataset settings dataset_type = 'OpenImagesChallengeDataset' train_dataloader = dict( dataset=dict( type=dataset_type, ann_file='...
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] model = dict( roi_head=dict(bbox_head=dict(num_classes=500)), test_cfg=dict(rcnn=dict(score_thr=0.01))) # dataset settings dataset_type = 'OpenImagesChallengeDataset' data_root = 'data/OpenImages/' data = dict( train=dict( type=dataset_type, ...
""" Compute image embeddings """ from __future__ import annotations import os from PIL import Image from sentence_transformers import SentenceTransformer, util def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None: model = clip_vit_b_32_model # Encode an image: image_filepath = os.p...
""" Compute image embeddings """ from __future__ import annotations import os from PIL import Image from sentence_transformers import SentenceTransformer, util def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None: model = clip_vit_b_32_model # Encode an image: image_filepath = os.p...
from datasets import load_dataset from sentence_transformers.models import Pooling, Transformer from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import ( SparseBinaryClassificationEvaluator, ) from sentence_transformers.sparse_encoder.models import...
from datasets import load_dataset from sentence_transformers.models import Pooling, Transformer from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import ( SparseBinaryClassificationEvaluator, ) from sentence_transformers.sparse_encoder.models import...
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
from enum import Enum from typing import Any, Dict, Iterable import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """The metric for the contrastive loss""" EUCLIDEAN = lambda x, y: F.pairwis...
from enum import Enum from typing import Any, Dict, Iterable import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """The metric for the contrastive loss""" EUCLIDEAN = lambda x, y: F.pairwis...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from pathlib import Path import pytest @pytest.fixture(scope='session') def docker_image_name() -> str: return Path(__file__).parents[1].stem.lower() @pytest.fixture(scope='session') def bui...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from pathlib import Path import pytest @pytest.fixture(scope='session') def docker_image_name() -> str: return Path(__file__).parents[1].stem.lower() @pytest.fixture(scope='session') def bui...
import numpy as np from .any_url import AnyUrl class ImageUrl(AnyUrl): def load(self) -> np.ndarray: """ transform the url in a image Tensor this is just a patch we will move the function from old docarray :return: tensor image """ return np.zeros((3, 224, 224))
import numpy as np from docarray.typing import Tensor from .any_url import AnyUrl class ImageUrl(AnyUrl): def load(self) -> Tensor: """ transform the url in a image Tensor this is just a patch we will move the function from old docarray :return: tensor image """ ...
import json import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm try: import matplotlib matplotlib.use('Agg') from graphviz import Source from matplotlib.axes import Axes except ImportError: pass pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotli...
import json import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm try: import matplotlib matplotlib.use('Agg') from graphviz import Source from matplotlib.axes import Axes except ImportError: pass pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotli...
import argparse import os from typing import List, Union from jina.parsers.helper import CastHostAction def api_to_dict(show_all_args: bool = False): """Convert Jina API to a dict :param show_all_args: if set, then hidden args are also exported :return: dict """ if show_all_args: from jin...
import argparse import os from typing import List, Union def api_to_dict(show_all_args: bool = False): """Convert Jina API to a dict :param show_all_args: if set, then hidden args are also exported :return: dict """ if show_all_args: from jina.parsers import helper helper._SHOW_AL...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transfor...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if...
import ast from collections import defaultdict # Function to perform topological sorting def topological_sort(dependencies: dict) -> list[list[str]]: """Given the dependencies graph construct sorted list of list of modular files For example, returned list of lists might be: [ ["../modular...
import ast from collections import defaultdict # Function to perform topological sorting def topological_sort(dependencies: dict): # Nodes are the name of the models to convert (we only add those to the graph) nodes = {node.rsplit("modular_", 1)[1].replace(".py", "") for node in dependencies.keys()} # Thi...
""" OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages. On the website, you can download parallel datasets for many languages in different formats. I found that the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal overhea...
""" OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages. On the website, you can download parallel datasets for many languages in different formats. I found that the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal overhea...
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), # `mean` and `to_rgb` should be the same with the `preprocess_cfg` dict(type='Expand', mean=[0, 0, 0], to_rgb=True, rat...
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' # dataset settings # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') train_pip...
from collections import namedtuple from typing import Any, Callable, Optional, TypeVar from typing_extensions import NamedTuple import torch.return_types from torch.utils._pytree import PyTree, tree_flatten, TreeSpec FlattenFuncSpec = Callable[[PyTree, TreeSpec], list] FlattenFuncExactMatchSpec = Callable[[PyTree, T...
from collections import namedtuple from typing import Any, Callable, Optional, TypeVar from typing_extensions import NamedTuple import torch.return_types from torch.utils._pytree import PyTree, tree_flatten, TreeSpec FlattenFuncSpec = Callable[[PyTree, TreeSpec], list] FlattenFuncExactMatchSpec = Callable[[PyTree, T...
from typing import Any, Collection, List, Optional, Tuple, Union from llama_index.core.tools.types import AsyncBaseTool from pydantic import BaseModel class LLMCompilerParseResult(BaseModel): """LLMCompiler parser result.""" thought: str idx: int tool_name: str args: str class JoinerOutput(Bas...
from typing import Any, Collection, List, Optional, Tuple, Union from llama_index.core.tools.types import AsyncBaseTool from pydantic import BaseModel class LLMCompilerParseResult(BaseModel): """LLMCompiler parser result.""" thought: str idx: int tool_name: str args: str class JoinerOutput(Bas...
# Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .grid_assigner import GridAssign...
# Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .grid_assigner import GridAssign...
"""Load agent.""" from collections.abc import Sequence from typing import Any, Optional from langchain_core._api import deprecated from langchain_core.callbacks import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool from langchain._api.deprec...
"""Load agent.""" from collections.abc import Sequence from typing import Any, Optional from langchain_core._api import deprecated from langchain_core.callbacks import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool from langchain._api.deprec...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
_base_ = './grid-rcnn_r50_fpn_gn-head_2x_coco.py' # training schedule max_epochs = 12 train_cfg = dict(max_epochs=max_epochs) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.0001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, ...
_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' # training schedule max_epochs = 12 train_cfg = dict(max_epochs=max_epochs) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.0001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, ...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from __future__ import annotations import logging import os from datasets import load_dataset from sentence_transformers.sparse_encoder import ( SparseEncoder, ) from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator from sentence_transformers.sparse_encoder.l...
from __future__ import annotations import logging import os from datasets import load_dataset from sentence_transformers.sparse_encoder import ( SparseEncoder, ) from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator from sentence_transformers.sparse_encoder.l...
import numpy as np import pytest from keras.src import backend from keras.src import layers from keras.src import testing class DropoutTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_dropout_basics(self): self.run_layer_test( layers.Dropout, init_kwarg...
import numpy as np import pytest from keras.src import backend from keras.src import layers from keras.src import testing class DropoutTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_dropout_basics(self): self.run_layer_test( layers.Dropout, init_kwarg...
import pytest from whisper.tokenizer import get_tokenizer @pytest.mark.parametrize("multilingual", [True, False]) def test_tokenizer(multilingual): tokenizer = get_tokenizer(multilingual=False) assert tokenizer.sot in tokenizer.sot_sequence assert len(tokenizer.all_language_codes) == len(tokenizer.all_la...
from whisper.tokenizer import get_tokenizer def test_tokenizer(): gpt2_tokenizer = get_tokenizer(multilingual=False) multilingual_tokenizer = get_tokenizer(multilingual=True) text = "다람쥐 헌 쳇바퀴에 타고파" gpt2_tokens = gpt2_tokenizer.encode(text) multilingual_tokens = multilingual_tokenizer.encode(text...
import numpy as np import pytest from docarray import BaseDoc, DocArray from docarray.typing import NdArray @pytest.mark.parametrize('shuffle', [False, True]) @pytest.mark.parametrize('stack', [False, True]) @pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)]) def test_batch(shuffle, stack, batch_si...
import numpy as np import pytest from docarray import BaseDocument, DocumentArray from docarray.typing import NdArray @pytest.mark.parametrize('shuffle', [False, True]) @pytest.mark.parametrize('stack', [False, True]) @pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)]) def test_batch(shuffle, stack...
import pytest from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.computation.tensorflow_backend import TensorFlowCompBackend from docarray.typing import TensorFlowTensor metrics = TensorFlowCompBackend.Met...
import pytest from docarray.utils.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.computation.tensorflow_backend import TensorFlowCompBackend from docarray.typing import TensorFlowTensor metrics = TensorFlowCompBackend.Metrics else:...
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_doc import BaseDoc from docarray.typing import AnyEmbedding, AudioUrl from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typ...
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_doc import BaseDoc from docarray.typing import AnyEmbedding, AudioUrl from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typ...
from __future__ import annotations from .splade_callbacks import SchedulerType, SpladeLambdaSchedulerCallback __all__ = ["SpladeLambdaSchedulerCallback", "SchedulerType"]
from __future__ import annotations from sentence_transformers.sparse_encoder.callbacks.splade_callbacks import ( SchedulerType, SpladeLambdaSchedulerCallback, ) __all__ = ["SpladeLambdaSchedulerCallback", "SchedulerType"]
""" Example of training with Dask on GPU ==================================== """ import cupy as cp import dask_cudf from dask import array as da from dask import dataframe as dd from dask.distributed import Client from dask_cuda import LocalCUDACluster from xgboost import dask as dxgb from xgboost.dask import DaskDM...
""" Example of training with Dask on GPU ==================================== """ import cupy as cp import dask_cudf from dask import array as da from dask import dataframe as dd from dask.distributed import Client from dask_cuda import LocalCUDACluster from xgboost import dask as dxgb from xgboost.dask import DaskDMa...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import logging import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.logging import print_log from mmengine.registry import RUNNERS from mmengine.runner import Runner from mmdet.utils import register_all_modules d...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import logging import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.logging import print_log from mmengine.registry import RUNNERS from mmengine.runner import Runner from mmdet.utils import register_all_modules d...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras import activations as activations from keras import applications as applications from keras import callbacks as callbacks from keras import config as config from keras import constraints ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api import activations from keras.api import applications from keras.api import callbacks from keras.api import config from keras.api import constraints from keras.api import datasets fro...
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import List, Sequence, Union import numpy as np import torch from .base_data_element import BaseDataElement class PixelData(BaseDataElement): """Data structure for pixel-level annotations or predictions. All data items in ``data_fi...
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import List, Sequence, Union import numpy as np import torch from .base_data_element import BaseDataElement class PixelData(BaseDataElement): """Data structure for pixel-level annnotations or predictions. All data items in ``data_f...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.config import ConfigDict from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig from .two_stage import TwoStageDetector @MODELS.register_module() class MaskRCNN(TwoStageDetector): """Implementation of `Mask R-CNN <http...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.config import ConfigDict from mmdet.core.utils import OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .two_stage import TwoStageDetector @MODELS.register_module() class MaskRCNN(TwoStageDetector): """Implementation of `Mask R-CNN ...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Dict, Iterable, Sequence import numpy as np import tensorflow as tf from jina import DocumentArray, Executor, requests from jina.logging.logger import JinaLogger from jina_commons.batching imp...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Dict, Iterable, List, Union import numpy as np import tensorflow as tf from jina import DocumentArray, Executor, requests from jina.logging.logger import JinaLogger from jina_commons.batching ...
from typing import Union from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.utils._internal.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor tf_ava...
from typing import Union from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.utils.misc import is_tf_available, is_torch_available torch_available = is_torch_available() if torch_available: from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor tf_available = i...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Iterable, Optional import torch from jina import DocumentArray, Executor, requests from .audio_clip.model import AudioCLIP class AudioCLIPTextEncoder(Executor): """ Encode text data...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Iterable, Optional import torch from jina import DocumentArray, Executor, requests from .audio_clip.model import AudioCLIP class AudioCLIPTextEncoder(Executor): """ Encode text data...
"""Module for argparse for Client""" def mixin_comm_protocol_parser(parser): """Add the arguments for the protocol to the parser :param parser: the parser configure """ from jina.enums import GatewayProtocolType parser.add_argument( '--protocol', type=GatewayProtocolType.from_st...
"""Module for argparse for Client""" def mixin_comm_protocol_parser(parser): """Add the arguments for the protocol to the parser :param parser: the parser configure """ from jina.enums import GatewayProtocolType parser.add_argument( '--protocol', type=GatewayProtocolType.from_st...
"""Configure global settings and get information about the working environment.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # Machine learning module for Python # ================================== # # sklearn is a Python module integrating classical machine # learning algorithms...
"""Configure global settings and get information about the working environment.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # Machine learning module for Python # ================================== # # sklearn is a Python module integrating classical machine # learning algorithms...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64]. It generates sentence embeddings that can be compared using...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64]. It generates sentence embeddings that can be compared using...
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlite3 import sq...
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlite3 import sq...
from __future__ import annotations from dataclasses import dataclass from sentence_transformers.training_args import SentenceTransformerTrainingArguments @dataclass class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments): r""" SparseEncoderTrainingArguments extends :class:`~SentenceTransf...
from __future__ import annotations from dataclasses import dataclass from sentence_transformers.training_args import SentenceTransformerTrainingArguments @dataclass class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments): """ SparseEncoderTrainingArguments extends :class:`~SentenceTransfo...
# dataset settings dataset_type = 'RefCocoDataset' data_root = 'data/coco/' backend_args = None test_pipeline = [ dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict( type='LoadAnnotations', with_mask=True, with_b...
# dataset settings dataset_type = 'RefCOCODataset' data_root = 'data/refcoco/' backend_args = None train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict( type='PackDetInputs', meta_keys=('img_...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.22.0" SCIPY_MIN_VERSION = "1.8.0" JOBLIB_MIN_VERSION = "1...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.22.0" SCIPY_MIN_VERSION = "1.8.0" JOBLIB_MIN_VERSION = "1...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import pytest from ...simpleranker import SimpleRanker @pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']]) @pytest.mark.parametrize('ranking', ['min', 'max']) def test_ranking( documents_chunk, ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import pytest from ...simpleranker import SimpleRanker @pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']]) @pytest.mark.parametrize('ranking', ['min', 'max']) def test_ranking(documents_chunk, docume...
import os from pathlib import Path from torchaudio.datasets.libritts import LIBRITTS from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase _UTTERANCE_IDS = [ [19, 198, "000000", "000000"], [26, 495, "000004", "000000"], ] _ORIGINAL_TEXT = "this ...
import os from pathlib import Path from torchaudio.datasets.libritts import LIBRITTS from torchaudio_unittest.common_utils import ( get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase, ) _UTTERANCE_IDS = [ [19, 198, "000000", "000000"], [26, 495, "000004", "000000"], ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.openapi.planner import ( RequestsDeleteToolWithParsing, RequestsGetToolWithParsing, RequestsPatchToolWithParsing, RequestsPostToolWithParsing, ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.openapi.planner import ( RequestsDeleteToolWithParsing, RequestsGetToolWithParsing, RequestsPatchToolWithParsing, RequestsPostToolWithParsing, ...
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( # use ResNeSt img_norm data_preprocessor=dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], bgr_to_rgb=True), backbone=dict( type...
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( # use ResNeSt img_norm data_preprocessor=dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], bgr_to_rgb=True), backbone=dict( type...
import numpy as np from absl.testing import parameterized from keras.src import backend from keras.src import testing from keras.src.utils import backend_utils class BackendUtilsTest(testing.TestCase): @parameterized.named_parameters( ("numpy", "numpy"), ("jax", "jax"), ("tensorflow", "te...
import numpy as np from absl.testing import parameterized from keras.src import backend from keras.src import testing from keras.src.utils import backend_utils class BackendUtilsTest(testing.TestCase, parameterized.TestCase): @parameterized.named_parameters( ("numpy", "numpy"), ("jax", "jax"), ...
"""Test HuggingFace API wrapper.""" from pathlib import Path import pytest from langchain_community.llms.huggingface_hub import HuggingFaceHub from langchain_community.llms.loading import load_llm from tests.integration_tests.llms.utils import assert_llm_equality def test_huggingface_text_generation() -> None: ...
"""Test HuggingFace API wrapper.""" from pathlib import Path import pytest from langchain_community.llms.huggingface_hub import HuggingFaceHub from langchain_community.llms.loading import load_llm from tests.integration_tests.llms.utils import assert_llm_equality def test_huggingface_text_generation() -> None: ...
from __future__ import annotations from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator from sentence_transformers.sparse_encoder.evaluation import ( SparseBinaryClassificationEvaluator, SparseEmbeddingSimilarityEvaluator, SparseInformationRetrievalEvaluator, SparseM...
from __future__ import annotations from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator from sentence_transformers.sparse_encoder.evaluation import ( SparseBinaryClassificationEvaluator, SparseEmbeddingSimilarityEvaluator, SparseInformationRetrievalEvaluator, SparseM...
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is # redefined at each test that fixture # ruff: noqa import numpy as np import pytest import torch from pydantic import Field from docarray import BaseDoc from docarray.index.backends.weaviate import WeaviateDocumentIndex from ...
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is # redefined at each test that fixture # ruff: noqa import numpy as np import pytest import torch from pydantic import Field from docarray import BaseDoc from docarray.index.backends.weaviate import WeaviateDocumentIndex from ...
from typing import Any, Dict, Optional from llama_index.core.base.llms.types import LLMMetadata from llama_index.core.bridge.pydantic import Field from llama_index.core.constants import ( DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE, ) from llama_index.core.base.llms.generic_utils impor...
from typing import Any, Dict, Optional from llama_index.core.base.llms.types import LLMMetadata from llama_index.core.bridge.pydantic import Field from llama_index.core.constants import ( DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE, ) from llama_index.core.base.llms.generic_utils impor...
import types from keras.src.activations.activations import celu from keras.src.activations.activations import elu from keras.src.activations.activations import exponential from keras.src.activations.activations import gelu from keras.src.activations.activations import hard_sigmoid from keras.src.activations.activation...
import types from keras.src.activations.activations import elu from keras.src.activations.activations import exponential from keras.src.activations.activations import gelu from keras.src.activations.activations import hard_sigmoid from keras.src.activations.activations import hard_silu from keras.src.activations.activ...
from typing import Any, Dict, Optional import httpx from llama_index.core.base.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, ) from llama_index.core.bridge.pydantic import Field from llama_index.core.callbacks import CallbackManager from llama_index.embeddings.fireworks.utils import ( resolve_fireworks_cr...
from typing import Any, Dict, Optional import httpx from llama_index.core.base.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, ) from llama_index.core.bridge.pydantic import Field from llama_index.core.callbacks import CallbackManager from llama_index.embeddings.fireworks.utils import ( resolve_fireworks_cr...
from docarray import BaseDoc from docarray.typing import AnyUrl def test_set_any_url(): class MyDocument(BaseDoc): any_url: AnyUrl d = MyDocument(any_url="https://jina.ai") assert isinstance(d.any_url, AnyUrl) assert d.any_url == "https://jina.ai"
from docarray import BaseDocument from docarray.typing import AnyUrl def test_set_any_url(): class MyDocument(BaseDocument): any_url: AnyUrl d = MyDocument(any_url="https://jina.ai") assert isinstance(d.any_url, AnyUrl) assert d.any_url == "https://jina.ai"
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class Translation: """`FeatureConnector` for translations with fixed languages per example. Here for ...
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class Translation: """`FeatureConnector` for translations with fixed languages per example. Here for ...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# docstyle-ignore INSTALL_CONTENT = """ # Datasets installation ! pip install datasets transformers # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/datasets.git """ notebook_first_cells = [{"type": "code...
default_branch_name = "main" version_prefix = ""
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.backend.config import backend from keras.src.backend.config import disable_flash_attention from keras.src.backend.config import enable_flash_attention from keras.src.backend.config im...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.backend.config import backend from keras.src.backend.config import epsilon from keras.src.backend.config import floatx from keras.src.backend.config import image_data_format from kera...
import torch _TORCHFUNCTION_SUBCLASS = False class _ReturnTypeCM: def __init__(self, to_restore): self.to_restore = to_restore def __enter__(self): return self def __exit__(self, *args): global _TORCHFUNCTION_SUBCLASS _TORCHFUNCTION_SUBCLASS = self.to_restore def set_r...
import torch _TORCHFUNCTION_SUBCLASS = False class _ReturnTypeCM: def __init__(self, to_restore): self.to_restore = to_restore def __enter__(self): return self def __exit__(self, *args): global _TORCHFUNCTION_SUBCLASS _TORCHFUNCTION_SUBCLASS = self.to_restore def set_r...
import pathlib from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper from torchvision.datapoints import BoundingBox from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils import Dataset, Encod...
import pathlib from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper from torchvision.prototype.datapoints import BoundingBox, Label from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResou...
import os import re from pathlib import Path from typing import Optional, Tuple, Union import torch import torchaudio from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import extract_archive URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz" _C...
import os import re from pathlib import Path from typing import Optional, Tuple, Union import torch import torchaudio from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import extract_archive URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz" _C...
from typing import Any from llama_index.core.bridge.pydantic import Field, model_serializer from llama_index.core.tools import ToolSelection, ToolOutput from llama_index.core.llms import ChatMessage from llama_index.core.workflow import Event, StartEvent class AgentInput(Event): """LLM input.""" input: list...
from typing import Any from llama_index.core.bridge.pydantic import model_serializer from llama_index.core.tools import ToolSelection, ToolOutput from llama_index.core.llms import ChatMessage from llama_index.core.workflow import Event, StartEvent class AgentInput(Event): """LLM input.""" input: list[ChatMe...
import json import os import pytest from hubble.executor import HubExecutor from hubble.executor.hubio import HubIO from jina import __version__ from jina.orchestrate.deployments.config.helper import ( get_base_executor_version, get_image_name, to_compatible_name, ) @pytest.mark.parametrize('is_master',...
import json import os import pytest from jina import __version__ from jina.hubble import HubExecutor from jina.hubble.hubio import HubIO from jina.orchestrate.deployments.config.helper import ( get_base_executor_version, get_image_name, to_compatible_name, ) @pytest.mark.parametrize('is_master', (True, ...
"""Test chat model integration using standard integration tests.""" from typing import Type from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_ollama.chat_models import ChatOllama class TestChatOllama(ChatModelIntegrationTests): @property def chat_model_class(self) -> Ty...
"""Test chat model integration using standard integration tests.""" from typing import Type from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_ollama.chat_models import ChatOllama class TestChatOllama(ChatModelIntegrationTests): @property def chat_model_class(self) -> Ty...
import time from functools import partial from huggingface_hub import HfApi, hf_hub_url from huggingface_hub.hf_api import RepoFile from packaging import version from requests import ConnectionError, HTTPError from .. import config from . import logging logger = logging.get_logger(__name__) # Retry `preupload_lfs_...
import time from functools import partial from huggingface_hub import HfApi, hf_hub_url from packaging import version from requests import ConnectionError, HTTPError from .. import config from . import logging logger = logging.get_logger(__name__) # Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "5...
# Copyright (c) OpenMMLab. All rights reserved. from .base import BaseMOTModel from .bytetrack import ByteTrack from .deep_sort import DeepSORT from .qdtrack import QDTrack __all__ = ['BaseMOTModel', 'ByteTrack', 'QDTrack', 'DeepSORT']
# Copyright (c) OpenMMLab. All rights reserved. from .base import BaseMOTModel from .bytetrack import ByteTrack from .qdtrack import QDTrack __all__ = ['BaseMOTModel', 'ByteTrack', 'QDTrack']
import copy import importlib import os import sys from keras.src import backend as backend_module from keras.src.api_export import keras_export from keras.src.backend.common import global_state def in_tf_graph(): if global_state.get_global_attribute("in_tf_graph_scope", False): return True if "tenso...
import copy import importlib import os import sys from keras.src import backend as backend_module from keras.src.api_export import keras_export from keras.src.backend.common import global_state def in_tf_graph(): if global_state.get_global_attribute("in_tf_graph_scope", False): return True if "tenso...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 20 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .build_functions import (build_model_from_cfg, build_runner_from_cfg, ...
# Copyright (c) OpenMMLab. All rights reserved. """MMEngine provides 20 root registries to support using modules across projects. More datails can be found at https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. """ from .build_functions import (build_model_from_cfg, build_runner_from_cfg, ...
import hashlib import json from typing import Tuple, TYPE_CHECKING import numpy as np if TYPE_CHECKING: # pragma: no cover from docarray.typing import T class FeatureHashMixin: """Provide helper functions for feature hashing.""" def embed_feature_hashing( self: 'T', n_dim: int = 256, ...
import hashlib import json from typing import Tuple, TYPE_CHECKING import numpy as np if TYPE_CHECKING: from docarray.typing import T class FeatureHashMixin: """Provide helper functions for feature hashing.""" def embed_feature_hashing( self: 'T', n_dim: int = 256, sparse: bool ...