input
stringlengths
33
5k
output
stringlengths
32
5k
from langchain_core.outputs import ( ChatGeneration, ChatGenerationChunk, ChatResult, Generation, GenerationChunk, LLMResult, RunInfo, ) __all__ = [ "ChatGeneration", "ChatGenerationChunk", "ChatResult", "Generation", "GenerationChunk", "LLMResult", "RunInfo", ]
from langchain_core.outputs import ( ChatGeneration, ChatGenerationChunk, ChatResult, Generation, GenerationChunk, LLMResult, RunInfo, ) __all__ = [ "Generation", "GenerationChunk", "ChatGeneration", "ChatGenerationChunk", "RunInfo", "ChatResult", "LLMResult", ]
from .common_utils import _get_id2label, _get_label2id, create_tsv from .feature_utils import dump_features from .kmeans import get_km_label, learn_kmeans __all__ = [ "create_tsv", "_get_id2label", "_get_label2id", "dump_features", "learn_kmeans", "get_km_label", ]
from .common_utils import create_tsv from .feature_utils import dump_features from .kmeans import get_km_label, learn_kmeans __all__ = [ "create_tsv", "dump_features", "learn_kmeans", "get_km_label", ]
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.cifar10 import load_data as load_data
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.cifar10 import load_data
""" This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search. As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k): https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pai...
""" This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search. As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k): https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pai...
import inspect import re from hashlib import sha256 from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from...
import inspect import re from hashlib import sha256 from typing import List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text...
# pants requires this import to recognize the dep import pytest_asyncio # noqa: F401 import pytest from llama_index.core.workflow.workflow import Workflow from llama_index.core.workflow.decorators import step from llama_index.core.workflow.events import StartEvent, StopEvent, Event from llama_index.core.bridge.pydan...
import pytest from llama_index.core.workflow.workflow import Workflow from llama_index.core.workflow.decorators import step from llama_index.core.workflow.events import StartEvent, StopEvent, Event from llama_index.core.bridge.pydantic import Field class OneTestEvent(Event): test_param: str = Field(default="test...
import random from collections import defaultdict from typing import Dict, Any, TYPE_CHECKING, Generator, List import numpy as np from docarray.helper import dunder_get if TYPE_CHECKING: # pragma: no cover from docarray import DocumentArray class GroupMixin: """These helpers yield groups of :class:`Docume...
import random from collections import defaultdict from typing import Dict, Any, TYPE_CHECKING, Generator, List import numpy as np from docarray.helper import dunder_get if TYPE_CHECKING: from docarray import DocumentArray class GroupMixin: """These helpers yield groups of :class:`DocumentArray` from a ...
import csv import logging import os from typing import List import numpy as np from sklearn.metrics import average_precision_score from sentence_transformers import InputExample from sentence_transformers.evaluation import BinaryClassificationEvaluator logger = logging.getLogger(__name__) class CEBinaryClassificat...
import logging from sklearn.metrics import average_precision_score from typing import List import numpy as np import os import csv from ... import InputExample from ...evaluation import BinaryClassificationEvaluator logger = logging.getLogger(__name__) class CEBinaryClassificationEvaluator: """ This evalua...
from ._label import Label, OneHotLabel
from ._bounding_box import BoundingBox, BoundingBoxFormat from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT from ._image import Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT from ._label import Label, OneHotLabel from ._mask import Mask from ._video import TensorVideoType, Ten...
"""GraphQL Reader.""" from typing import Dict, List, Optional import yaml from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class GraphQLReader(BaseReader): """ GraphQL reader. Combines all GraphQL results into the Document used by LlamaIndex. Args: ...
"""GraphQL Reader.""" from typing import Dict, List, Optional import yaml from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class GraphQLReader(BaseReader): """GraphQL reader. Combines all GraphQL results into the Document used by LlamaIndex. Args: ...
from typing import Dict, List, Tuple import pytest from llama_index.core.schema import Document from tests.mock_utils.mock_prompts import ( MOCK_INSERT_PROMPT, MOCK_QUERY_PROMPT, MOCK_REFINE_PROMPT, MOCK_SUMMARY_PROMPT, MOCK_TEXT_QA_PROMPT, ) @pytest.fixture() def documents() -> List[Document]: ...
from typing import Dict, List, Tuple import pytest from llama_index.core.schema import Document from tests.mock_utils.mock_prompts import ( MOCK_INSERT_PROMPT, MOCK_QUERY_PROMPT, MOCK_REFINE_PROMPT, MOCK_SUMMARY_PROMPT, MOCK_TEXT_QA_PROMPT, ) @pytest.fixture() def documents() -> List[Document]: ...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators Extend this class and implement __call__ for custom evaluators. ...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators Extend this class and implement __call__ for custom evaluators. ...
import asyncio import pytest from llama_index.graph_rag.cognee import CogneeGraphRAG @pytest.mark.asyncio() async def test_get_graph_url(monkeypatch): # Instantiate cognee GraphRAG cogneeRAG = CogneeGraphRAG( llm_api_key="", llm_provider="openai", llm_model="gpt-4o-mini", graph...
import asyncio import pytest from llama_index.graph_rag.cognee import CogneeGraphRAG @pytest.mark.asyncio() async def test_get_graph_url(monkeypatch): # Instantiate cognee GraphRAG cogneeRAG = CogneeGraphRAG( llm_api_key="", llm_provider="openai", llm_model="gpt-4o-mini", graph...
import os import pickle from typing import Optional, Iterable, Tuple from jina import Executor, requests, DocumentArray from jina.excepts import PretrainedModelFileDoesNotExist from jina_commons.batching import get_docs_batch_generator class TFIDFTextEncoder(Executor): """ Encode text into tf-idf sparse embe...
import os import pickle from typing import Optional, Iterable, Any, List, Tuple from jina import Executor, requests, DocumentArray from jina.excepts import PretrainedModelFileDoesNotExist from jina_commons.batching import get_docs_batch_generator class TFIDFTextEncoder(Executor): """ Encode text into tf-idf ...
import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """Builder Config for AudioFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post...
from typing import List import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """Builder Config for AudioFolder.""" drop_labels: bool = None drop_metadata: bo...
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve __all__ = ["add_noise", "barkscale_fbanks", "convolve", "fftconvolve"]
from .functional import add_noise, convolve, fftconvolve __all__ = ["add_noise", "convolve", "fftconvolve"]
import pytest from huggingface_hub import snapshot_download @pytest.fixture def dataset_dir(tmp_path): dataset_dir = tmp_path / "test_command_dataset_dir" snapshot_download("hf-internal-testing/ner-jsonl", repo_type="dataset", local_dir=dataset_dir) return str(dataset_dir)
import pytest DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" DATASET_LOADING_SCRIPT_CODE = """ import json import os import datasets REPO_URL = "https://huggingface.co/datasets/hf-internal-testing/raw_jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-...
"""Init params.""" from llama_index.finetuning.openai.base import OpenAIFinetuneEngine __all__ = ["OpenAIFinetuneEngine"]
"""Init params.""" from llama_index.finetuning.openai.base import OpenAIFinetuneEngine __all__ = ["OpenAIFinetuneEngine"]
# Copyright (c) OpenMMLab. All rights reserved. import os from typing import Optional import torch try: import torch_npu # noqa: F401 import torch_npu.npu.utils as npu_utils # Enable operator support for dynamic shape and # binary operator support on the NPU. npu_jit_compile = bool(os.getenv('NP...
# Copyright (c) OpenMMLab. All rights reserved. import os from typing import Optional import torch try: import torch_npu # noqa: F401 import torch_npu.npu.utils as npu_utils # Enable operator support for dynamic shape and # binary operator support on the NPU. npu_jit_compile = bool(os.getenv('NP...
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
_base_ = './reppoints_moment_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg))
_base_ = './reppoints_moment_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg)) optimizer = dict(lr=0.01)
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np from mmengine.testing import assert_allclose from mmdet.structures.mask import BitmapMasks, PolygonMasks def create_random_bboxes(num_bboxes, img_w, img_h): bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2)) bboxes_right_bottom...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np from mmengine.testing import assert_allclose from mmdet.data_elements.mask import BitmapMasks, PolygonMasks def create_random_bboxes(num_bboxes, img_w, img_h): bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2)) bboxes_right_bot...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from mmdet.registry import MODELS from .anchor_head import AnchorHead @MODELS.register_module() class RetinaSepBNHead(AnchorHead): """"RetinaHead with separate BN. In Retin...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RetinaSepBNHead(AnchorHead): """"RetinaHead with separate BN. In RetinaHead, ...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primar...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primar...
"""Module for helper functions for clients.""" from typing import Optional, Tuple from jina._docarray import Document, DocumentArray, docarray_v2 from jina.enums import DataInputType from jina.types.request.data import DataRequest if docarray_v2: from docarray import DocList def _new_data_request_from_batch( ...
"""Module for helper functions for clients.""" from typing import Optional, Tuple from jina._docarray import Document, DocumentArray, docarray_v2 from jina.enums import DataInputType from jina.types.request.data import DataRequest def _new_data_request_from_batch( batch, data_type: DataInputType, endpoin...
import json import os import subprocess import pytest from jina.checker import NetworkChecker from jina.jaml import JAML from jina.orchestrate.pods.factory import PodFactory from jina.parsers import set_deployment_parser, set_pod_parser from jina.parsers.ping import set_ping_parser from jina_cli.autocomplete import a...
import json import os import subprocess import pytest from jina.checker import NetworkChecker from jina.jaml import JAML from jina.orchestrate.pods.factory import PodFactory from jina.parsers import set_deployment_parser, set_pod_parser from jina.parsers.ping import set_ping_parser from jina_cli.autocomplete import a...
import logging from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembledistil") evaluator = SparseNanoBEIR...
import logging from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembledistil") evaluator = SparseNanoBEIR...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from pathlib import Path import pytest @pytest.fixture(scope='session') def docker_image_name() -> str: return Path(__file__).parents[1].stem.lower() @pytest.fixture(scope='session') def bui...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import pytest TEST_DIR = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope='session', autouse=True) def create_model_weights(): path_to_embedding_array = os.path.join(TEST_DIR, 'unit', ...
_base_ = './queryinst_r50_fpn_ms-480-800-3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) # augmentation strategy originates from DETR. train_pipe...
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) # augmentation strategy originates from DETR. train...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import NetworkxEntityGraph from langchain_community.graphs.networkx_graph import ( KnowledgeTriple, get_entities, parse_triples, ) # Create a way to d...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.graphs import NetworkxEntityGraph from langchain_community.graphs.networkx_graph import ( KnowledgeTriple, get_entities, parse_triples, ) # Create a way to d...
"""CIFAR100 small images classification dataset.""" import os import numpy as np from keras.src import backend from keras.src.api_export import keras_export from keras.src.datasets.cifar import load_batch from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.cifar100.load_data") def load_da...
"""CIFAR100 small images classification dataset.""" import os import numpy as np from keras.src import backend from keras.src.api_export import keras_export from keras.src.datasets.cifar import load_batch from keras.src.utils.file_utils import get_file @keras_export("keras.datasets.cifar100.load_data") def load_da...
"""Test volc engine maas chat model.""" from langchain_core.callbacks import CallbackManager from langchain_core.messages import AIMessage, BaseMessage, HumanMessage from langchain_core.outputs import ChatGeneration, LLMResult from langchain_community.chat_models.volcengine_maas import VolcEngineMaasChat from tests.u...
"""Test volc engine maas chat model.""" from langchain_core.callbacks import CallbackManager from langchain_core.messages import AIMessage, BaseMessage, HumanMessage from langchain_core.outputs import ChatGeneration, LLMResult from langchain_community.chat_models.volcengine_maas import VolcEngineMaasChat from tests.u...
_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
"""Init file.""" from llama_index.readers.papers.arxiv.base import ArxivReader from llama_index.readers.papers.pubmed.base import PubmedReader __all__ = ["ArxivReader", "PubmedReader"]
"""Init file.""" from llama_index.readers.papers.arxiv.base import ArxivReader from llama_index.readers.papers.pubmed.base import PubmedReader __all__ = ["ArxivReader", "PubmedReader"]
# coding=utf-8 # Copyright 2025 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2025 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
from parameterized import parameterized from torchaudio._internal.module_utils import is_module_available from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase if is_module_available("unidecode") and is_module_available("inflect"): from pipeline_tacotron2.text.numbers import ( _ex...
from parameterized import parameterized from torchaudio._internal.module_utils import is_module_available from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule if is_module_available("unidecode") and is_module_available("inflect"): from pipeline_tacotron2.text.numbers import ( _re...
from functools import partial from inspect import isclass from typing import Any, Union, cast from pydantic import BaseModel from langchain_core.language_models import FakeListChatModel from langchain_core.load.dump import dumps from langchain_core.load.load import loads from langchain_core.messages import HumanMessa...
from functools import partial from inspect import isclass from typing import Any, Union, cast from pydantic import BaseModel from langchain_core.language_models import FakeListChatModel from langchain_core.load.dump import dumps from langchain_core.load.load import loads from langchain_core.messages import HumanMessa...
from docarray import BaseDocument, DocumentArray from docarray.documents import ImageDoc from docarray.typing import NdArray class MyDoc(BaseDocument): embedding: NdArray text: str image: ImageDoc def test_from_to_json(): da = DocumentArray[MyDoc]( [ MyDoc( embedd...
from docarray import BaseDocument, DocumentArray from docarray.documents import Image from docarray.typing import NdArray class MyDoc(BaseDocument): embedding: NdArray text: str image: Image def test_from_to_json(): da = DocumentArray[MyDoc]( [ MyDoc(embedding=[1, 2, 3, 4, 5], te...
_base_ = './cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
_base_ = './cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
# pylint: disable=protected-access """Shared typing definition.""" import ctypes import os from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) # os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame/ #...
# pylint: disable=protected-access """Shared typing definition.""" import ctypes import os from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Sequence, Type, TypeVar, Union, ) # os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame/ # cudf.DataFrame/cupy.arra...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from operator import itemgetter import pytest from jina import Executor, Document, DocumentArray import cv2 from ...yolov5_segmenter import YoloV5Segmenter cur_dir = os.path.dirname(os.path.abspath(_...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from operator import itemgetter import pytest from jina import Executor, Document, DocumentArray import cv2 from yolov5_segmenter import YoloV5Segmenter cur_dir = os.path.dirname(os.path.abspath(__fi...
""" This script contains an example how to perform semantic search with Qdrant. You need Qdrant up and running locally: https://qdrant.tech/documentation/quickstart/ Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.: ``` pip install qdrant-client ``` This script was create...
""" This script contains an example how to perform semantic search with Qdrant. You need Qdrant up and running locally: https://qdrant.tech/documentation/quickstart/ Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.: ``` pip install qdrant-client ``` This script was create...
from __future__ import annotations import logging from datasets import load_dataset from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments from sentence_transformers.evaluation import SequentialEvaluator from sentence_transformers.models import Pooling, Transformer from...
from __future__ import annotations import logging from datasets import load_dataset from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments from sentence_transformers.evaluation import SequentialEvaluator from sentence_transformers.models import Pooling, Transformer from...
""" This example starts multiple processes (1 per GPU), which encode sentences in parallel. This gives a near linear speed-up when encoding large text collections. It also demonstrates how to stream data which is helpful in case you don't want to wait for an extremely large dataset to download, or if you want to limit ...
""" This example starts multiple processes (1 per GPU), which encode sentences in parallel. This gives a near linear speed-up when encoding large text collections. It also demonstrates how to stream data which is helpful in case you don't want to wait for an extremely large dataset to download, or if you want to limit ...
"""Init params.""" from llama_index.finetuning.rerankers.cohere_reranker import ( CohereRerankerFinetuneEngine, ) from llama_index.finetuning.rerankers.dataset_gen import CohereRerankerFinetuneDataset __all__ = ["CohereRerankerFinetuneEngine", "CohereRerankerFinetuneDataset"]
"""Init params.""" from llama_index.finetuning.rerankers.cohere_reranker import ( CohereRerankerFinetuneEngine, ) from llama_index.finetuning.rerankers.dataset_gen import CohereRerankerFinetuneDataset __all__ = ["CohereRerankerFinetuneEngine", "CohereRerankerFinetuneDataset"]
# Copyright (c) OpenMMLab. All rights reserved. import unittest from mmdet.datasets import CocoDataset class TestCocoDataset(unittest.TestCase): def test_coco_dataset(self): # test CocoDataset metainfo = dict(CLASSES=('bus', 'car'), task_name='new_task') dataset = CocoDataset( ...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from mmdet.datasets import CocoDataset class TestCocoDataset(unittest.TestCase): def test_coco_dataset(self): # test CocoDataset metainfo = dict(CLASSES=('bus', 'car'), task_name='new_task') dataset = CocoDataset( ...
import functools import warnings from collections import defaultdict from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union import torch from torchvision import datapoints from torchvision.transforms.v2 import Transform from torchvision.transforms.v2.utils import is_simple_tensor T = TypeVar...
import functools import warnings from collections import defaultdict from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union import torch from torchvision import datapoints from torchvision.transforms.v2 import Transform from torchvision.transforms.v2.utils import is_simple_tensor T = TypeVar...
from __future__ import annotations from .IDF import IDF from .MLMTransformer import MLMTransformer from .SparseAutoEncoder import SparseAutoEncoder from .SpladePooling import SpladePooling __all__ = ["SparseAutoEncoder", "MLMTransformer", "SpladePooling", "IDF"]
from __future__ import annotations from .CSRSparsity import CSRSparsity from .IDF import IDF from .MLMTransformer import MLMTransformer from .SpladePooling import SpladePooling __all__ = ["CSRSparsity", "MLMTransformer", "SpladePooling", "IDF"]
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
from functools import wraps from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast from backend.data.credit import get_user_credit_model from backend.data.execution import ( ExecutionResult, RedisExecutionEventBus, create_graph_execution, get_execution_results, get_incom...
from functools import wraps from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast from backend.data.credit import get_user_credit_model from backend.data.execution import ( ExecutionResult, RedisExecutionEventBus, create_graph_execution, get_execution_results, get_incom...
_base_ = './mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
"""Test VLite functionality.""" from langchain_core.documents import Document from langchain_community.embeddings import FakeEmbeddings from langchain_community.vectorstores import VLite def test_vlite() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch = VL...
"""Test VLite functionality.""" from langchain_core.documents import Document from langchain_community.embeddings import FakeEmbeddings from langchain_community.vectorstores import VLite def test_vlite() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch = VL...
__version__ = "2.6.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" from .datasets import SentencesDataset, ParallelSentencesDataset from .LoggingHandler import LoggingHandler from .SentenceTransformer import SentenceTransformer from .readers import InputExample from .cross_encoder.CrossEncoder import Cross...
__version__ = "2.5.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" from .datasets import SentencesDataset, ParallelSentencesDataset from .LoggingHandler import LoggingHandler from .SentenceTransformer import SentenceTransformer from .readers import InputExample from .cross_encoder.CrossEncoder import Cross...
from .postgres_indexer import PostgreSQLStorage
from .postgres_indexer import PostgreSQLStorage
# Copyright (c) OpenMMLab. All rights reserved. import os from typing import Optional import torch try: import torch_npu # noqa: F401 # Enable operator support for dynamic shape and # binary operator support on the NPU. npu_jit_compile = bool(os.getenv('NPUJITCompile', False)) torch.npu.set_comp...
# Copyright (c) OpenMMLab. All rights reserved. import os from typing import Optional import torch def get_max_cuda_memory(device: Optional[torch.device] = None) -> int: """Returns the maximum GPU memory occupied by tensors in megabytes (MB) for a given device. By default, this returns the peak allocated mem...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine.data import InstanceData from mmdet.models.dense_heads import FCOSHead class TestFCOSHead(TestCase): def test_fcos_head_loss(self): """Tests fcos head loss when truth is empty and non-empty.""" ...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine.data import InstanceData from mmdet.models.dense_heads import FCOSHead class TestFCOSHead(TestCase): def test_fcos_head_loss(self): """Tests fcos head loss when truth is empty and non-empty.""" ...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RetinaHead(AnchorHead): r"""An anchor-based head used in `RetinaNet <https://arxiv.org/pdf/1708.02002.pdf>`_. ...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RetinaHead(AnchorHead): r"""An anchor-based head used in `RetinaNet <https://arxiv.org/pdf/1708.02002.pdf>`_. ...
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar from pydantic import create_model, create_model_from_typeddict from pydantic.config import BaseConfig from typing_extensions import TypedDict from docarray.utils._internal._typing import safe_issubclass from docarray import BaseDoc if TYPE_C...
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar from pydantic import create_model, create_model_from_typeddict from pydantic.config import BaseConfig from typing_extensions import TypedDict from docarray import BaseDoc if TYPE_CHECKING: from pydantic.typing import AnyClassMethod ...
from langchain_core._api import warn_deprecated from pydantic.v1.main import * # noqa: F403 warn_deprecated( "0.3.0", removal="1.0.0", alternative="pydantic.v1 or pydantic", message=( "As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. " "The langchain.pydantic_v1 modul...
from langchain_core._api import warn_deprecated try: from pydantic.v1.main import * # noqa: F403 except ImportError: from pydantic.main import * # type: ignore # noqa: F403 warn_deprecated( "0.3.0", removal="1.0.0", alternative="pydantic.v1 or pydantic", message=( "As of langchain-co...
import numpy as np import pytest from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph from sklearn.utils._testing import ( _convert_container, assert_allclose, ) def test_mutual_reachability_graph_error_sparse_format(): """Check that we raise an error if the sparse format is not C...
import numpy as np import pytest from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph from sklearn.utils._testing import ( _convert_container, assert_allclose, ) def test_mutual_reachability_graph_error_sparse_format(): """Check that we raise an error if the sparse format is not C...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import numpy as np from mmengine.config import Config, DictAction from mmengine.utils import ProgressBar from mmdet.models.utils import mask2ndarray from mmdet.registry import DATASETS, VISUALIZERS from mmdet.structures.bbox import ...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import numpy as np from mmengine.config import Config, DictAction from mmengine.utils import ProgressBar from mmdet.models.utils import mask2ndarray from mmdet.registry import DATASETS, VISUALIZERS from mmdet.structures.bbox import ...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64]. It generates sentence embeddings that can be compared using...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64]. It generates sentence embeddings that can be compared using...
""" This example starts multiple processes (1 per GPU), which encode sentences in parallel. This gives a near linear speed-up when encoding large text collections. It also demonstrates how to stream data which is helpful in case you don't want to wait for an extremely large dataset to download, or if you want to limit ...
""" This example starts multiple processes (1 per GPU), which encode sentences in parallel. This gives a near linear speed-up when encoding large text collections. It also demonstrates how to stream data which is helpful in case you don't want to wait for an extremely large dataset to download, or if you want to limit ...
import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_document.io.json import orjson_dumps from docarray.typing import TorchEmbedding, TorchTensor def test_proto_tensor(): tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224)) tensor._to_node_protobuf()...
import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_document.io.json import orjson_dumps from docarray.typing import TorchEmbedding, TorchTensor def test_proto_tensor(): tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224)) tensor._to_node_protobuf()...
from typing import Optional, Any from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore from llama_index.storage.kvstore.tablestore import TablestoreKVStore class TablestoreIndexStore(KVIndexStore): """ Tablestore Index store. Args: tablestore_kvstore (TablestoreKVStor...
from typing import Optional, Any from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore from llama_index.storage.kvstore.tablestore import TablestoreKVStore class TablestoreIndexStore(KVIndexStore): """Tablestore Index store. Args: tablestore_kvstore (TablestoreKVStore): T...
"""Callback Handler that tracks AIMessage.usage_metadata.""" import threading from collections.abc import Generator from contextlib import contextmanager from contextvars import ContextVar from typing import Any, Optional from langchain_core._api import beta from langchain_core.callbacks import BaseCallbackHandler fr...
"""Callback Handler that tracks AIMessage.usage_metadata.""" import threading from collections.abc import Generator from contextlib import contextmanager from contextvars import ContextVar from typing import Any, Optional from langchain_core.callbacks import BaseCallbackHandler from langchain_core.messages import AIM...
_base_ = './tood_r50_fpn_1x_coco.py' max_epochs = 24 # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[16, 22], g...
_base_ = './tood_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) # multi-scale training img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), ...
from __future__ import annotations from sentence_transformers.sparse_encoder.callbacks.splade_callbacks import ( SchedulerType, SpladeLambdaSchedulerCallback, ) from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator from sentence_transformers.sparse_encoder.evaluation import (...
from __future__ import annotations from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator from sentence_transformers.sparse_encoder.evaluation import ( SparseBinaryClassificationEvaluator, SparseEmbeddingSimilarityEvaluator, SparseInformationRetrievalEvaluator, SparseM...
import numpy as np import pytest from keras.src import layers from keras.src import models from keras.src import testing class MaskingTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_masking_basics(self): self.run_layer_test( layers.Masking, init_kwargs...
import numpy as np import pytest from keras.src import layers from keras.src import models from keras.src import testing class MaskingTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_masking_basics(self): self.run_layer_test( layers.Masking, init_kwargs...
import os import socket from typing import TYPE_CHECKING, Optional def get_docker_network(client) -> Optional[str]: """Do a best-effort guess if the caller is already in a docker network Check if `hostname` exists in list of docker containers. If a container is found, check its network id :param cli...
import os import socket from typing import TYPE_CHECKING, Optional def get_docker_network(client) -> Optional[str]: """Do a best-effort guess if the caller is already in a docker network Check if `hostname` exists in list of docker containers. If a container is found, check its network id :param cli...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_detection.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict(init_cfg=None), roi_head=dict( bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_ou...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_detection.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict(init_cfg=None), roi_head=dict( bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_ou...
import pytest from langchain_core.utils.iter import batch_iterate @pytest.mark.parametrize( ("input_size", "input_iterable", "expected_output"), [ (2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]), (3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]), (1, [100, 200, 300], [[100], [200], [300...
import pytest from langchain_core.utils.iter import batch_iterate @pytest.mark.parametrize( "input_size, input_iterable, expected_output", [ (2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]), (3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]), (1, [100, 200, 300], [[100], [200], [300]]), ...
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB_PLUS, SourceSeparationBundle __all__ = [ "CONVTASNET_BASE_LIBRI2MIX", "EMFORMER_RNNT_BASE_MUSTC", "EMFORMER_RNNT_BASE_TEDLIUM3", "Source...
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, SourceSeparationBundle __all__ = [ "CONVTASNET_BASE_LIBRI2MIX", "EMFORMER_RNNT_BASE_MUSTC", "EMFORMER_RNNT_BASE_TEDLIUM3", "SourceSeparationBundle", ]
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np def palette_val(palette): """Convert palette to matplotlib palette. Args: palette List[tuple]: A list of color tuples. Returns: List[tuple[float]]: A list of RGB matplotlib color tuples. """ new_palett...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import mmdet def palette_val(palette): """Convert palette to matplotlib palette. Args: palette List[tuple]: A list of color tuples. Returns: List[tuple[float]]: A list of RGB matplotlib color tuples. """ ...
# Copyright (c) OpenMMLab. All rights reserved. from ._utils import (demo_mm_inputs, demo_mm_proposals, demo_mm_sampling_results, get_detector_cfg, get_roi_head_cfg) __all__ = [ 'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg', 'demo_mm_proposals', 'demo_mm_sam...
# Copyright (c) OpenMMLab. All rights reserved. from ._utils import demo_mm_inputs, get_detector_cfg __all__ = ['demo_mm_inputs', 'get_detector_cfg']
from typing import Any, Dict, List, Optional, Sequence, Type, Union import PIL.Image import torch from torchvision import datapoints from torchvision.prototype.datapoints import Label, OneHotLabel from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import _get_fill, ...
from typing import Any, Dict, List, Optional, Sequence, Type, Union import PIL.Image import torch from torchvision import datapoints from torchvision.prototype.datapoints import Label, OneHotLabel from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import _setup_fill...
""" This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN). If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server. See https://public.ukp.informatik.tu-darmstadt.de/reimers/...
""" This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN). If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server. See https://public.ukp.informatik.tu-darmstadt.de/reimers/...
# dataset settings dataset_type = 'Objects365V1Dataset' data_root = 'data/Objects365/Obj365_v1/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detectio...
# dataset settings dataset_type = 'Objects365V1Dataset' data_root = 'data/Objects365/Obj365_v1/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = d...
import pickle from dataclasses import dataclass from io import BufferedIOBase from typing import Any import torch import torch._weights_only_unpickler as _weights_only_unpickler from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION __all__: list[str] = [] @dataclass class _Entry: key: st...
import pickle from dataclasses import dataclass from io import BufferedIOBase from typing import Any import torch import torch._weights_only_unpickler as _weights_only_unpickler from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION __all__: list[str] = [] @dataclass class _Entry: key: st...
# This example was adapted from https://github.com/muhrin/milad # It is licensed under the GLPv3 license. You can find a copy of it # here: https://www.gnu.org/licenses/gpl-3.0.en.html . import torch from torch import nn from torch.func import jacrev, vmap from torch.nn.functional import mse_loss sigma = 0.5 epsilon...
# This example was adapated from https://github.com/muhrin/milad # It is licensed under the GLPv3 license. You can find a copy of it # here: https://www.gnu.org/licenses/gpl-3.0.en.html . import torch from torch import nn from torch.func import jacrev, vmap from torch.nn.functional import mse_loss sigma = 0.5 epsilo...
# Copyright (c) OpenMMLab. All rights reserved. import warnings from abc import ABCMeta, abstractmethod from typing import Any, List, Optional, Sequence, Union from mmengine.dist import (broadcast_object_list, collect_results, is_main_process) class BaseMetric(metaclass=ABCMeta): """Ba...
# Copyright (c) OpenMMLab. All rights reserved. import warnings from abc import ABCMeta, abstractmethod from typing import Any, List, Optional, Sequence, Tuple, Union from mmengine.dist import (broadcast_object_list, collect_results, is_main_process) class BaseMetric(metaclass=ABCMeta): ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.activations import deserialize from keras.src.activations import get from keras.src.activations import serialize from keras.src.activations.activations import celu from keras.src.acti...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.activations import deserialize from keras.src.activations import get from keras.src.activations import serialize from keras.src.activations.activations import celu from keras.src.acti...
from __future__ import annotations from langchain_core.language_models import LanguageModelLike from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import BasePromptTemplate from langchain_core.retrievers import RetrieverLike, RetrieverOutputLike from langchain_core.runnables import R...
from __future__ import annotations from langchain_core.language_models import LanguageModelLike from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import BasePromptTemplate from langchain_core.retrievers import RetrieverLike, RetrieverOutputLike from langchain_core.runnables import R...
from . import InputExample import csv import os class TripletReader(object): """Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1), one positive example (s2) and one negative example (s3) """ def __init__( self, dataset_folder, s1_c...
from . import InputExample import csv import os class TripletReader(object): """ Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1), one positive example (s2) and one negative example (s3) """ def __init__( self, dataset_folder, ...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode T = TypeVar('T', bound='ImageTorchTensor') @_register_proto(proto_typ...
import numpy as np import torch import torchaudio.prototype.functional as F from parameterized import parameterized from scipy import signal from torchaudio_unittest.common_utils import nested_params, TestBaseMixin class FunctionalTestImpl(TestBaseMixin): @nested_params( [(10, 4), (4, 3, 1, 2), (2,), ()],...
import numpy as np import torch import torchaudio.prototype.functional as F from parameterized import parameterized from scipy import signal from torchaudio_unittest.common_utils import nested_params, TestBaseMixin class FunctionalTestImpl(TestBaseMixin): @nested_params( [(10, 4), (4, 3, 1, 2), (2,), ()],...
from typing_extensions import TYPE_CHECKING from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes from docarray.typing.id import ID from docarray.typing.tensor import ImageNdArray, ImageTensor from docarray.typing.tensor.audio import AudioNdArray, AudioTensor from docarray.typing.tensor.embedding.embedd...
from typing_extensions import TYPE_CHECKING from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes from docarray.typing.id import ID from docarray.typing.tensor import ImageNdArray, ImageTensor from docarray.typing.tensor.audio import AudioNdArray from docarray.typing.tensor.embedding.embedding import An...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def knowledge_distillation_kl_div_loss(pred, so...
import mmcv import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def knowledge_distillation_kl_div_loss(pred, soft_label, ...
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' from pathlib import Path import pytest import librosa import numpy as np from jina import Document, DocumentArray, Executor from jina.excepts import BadDocType from ...audio_clip_encoder import AudioCLIPEncode...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from pathlib import Path import librosa import numpy as np from jina import Document, DocumentArray, Executor from ...audio_clip_encoder import AudioCLIPEncoder def test_config(): ex = Executor.load_conf...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primar...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primar...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( random_size_range=(10, 20), backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) img_scale = (640, 640) # height, width tra...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( random_size_range=(10, 20), backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) img_scale = (640, 640) # height, width tra...
import os import re from pathlib import Path from typing import Optional, Tuple, Union import torch import torchaudio from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import extract_archive URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz" _C...
import os import re from pathlib import Path from typing import Optional, Tuple, Union import torch import torchaudio from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import extract_archive URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz" _C...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import os import time import uuid import pytest import qdrant_client from docarray.index import QdrantDocumentIndex cur_dir = os.path.dirname(os.path.abspath(__file__)) qdrant_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml')) @pytest.fixture(scope='session', autouse=True) def start_storage(): ...
import sys import pytest from hypothesis import given, settings, strategies from xgboost.testing import no_cupy from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem sys.path.append("tests/python") from test_data_iterator import run_data_iterator from test_data_iterator import test_single_...
import sys import pytest from hypothesis import given, settings, strategies from xgboost.testing import no_cupy from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem sys.path.append("tests/python") from test_data_iterator import run_data_iterator from test_data_iterator import test_single_...
from __future__ import annotations from sentence_transformers import util from sentence_transformers.losses.CoSENTLoss import CoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCoSENTLoss(CoSENTLoss): def __init__(self, model: SparseEncoder, scale: float = 20.0, s...
from __future__ import annotations from sentence_transformers import util from sentence_transformers.losses.CoSENTLoss import CoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseCoSENTLoss(CoSENTLoss): def __init__(self, model: SparseEncoder, scale: float = 20.0, s...
"""Copyright 2024, XGBoost contributors""" import pytest from dask_cuda import LocalCUDACluster from distributed import Client from xgboost.testing.dask import check_external_memory, get_rabit_args @pytest.mark.parametrize("is_qdm", [True, False]) def test_external_memory(is_qdm: bool) -> None: n_workers = 2 ...
"""Copyright 2024, XGBoost contributors""" import pytest from dask_cuda import LocalCUDACluster from distributed import Client import xgboost as xgb from xgboost import dask as dxgb from xgboost.testing.dask import check_external_memory @pytest.mark.parametrize("is_qdm", [True, False]) def test_external_memory(is_q...
""" Computes embeddings """ import numpy as np from sentence_transformers import SentenceTransformer def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None: """ Test that encode(output_value='token_embeddings') works """ model = paraphrase_distilroberta...
""" Computes embeddings """ import unittest from sentence_transformers import SentenceTransformer import numpy as np class ComputeEmbeddingsTest(unittest.TestCase): def setUp(self): self.model = SentenceTransformer('paraphrase-distilroberta-base-v1') def test_encode_token_embeddings(self): ...