input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
"""Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms... | """Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms... |
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_image import CLIPImageEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=... | import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_image import CLIPImageEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, ... |
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_steps.md']
def check_raw_file_full(raw, lang="python", keyword_... | import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_step... |
from typing import Optional, Union
from langchain.agents import AgentOutputParser
from langchain_core.agents import AgentAction, AgentFinish
def extract_action_details(text: str) -> tuple[Optional[str], Optional[str]]:
# Split the text into lines and strip whitespace
lines = [line.strip() for line in text.st... | from typing import Optional, Tuple, Union
from langchain.agents import AgentOutputParser
from langchain_core.agents import AgentAction, AgentFinish
def extract_action_details(text: str) -> Tuple[Optional[str], Optional[str]]:
# Split the text into lines and strip whitespace
lines = [line.strip() for line in ... |
import sys
from jina.parsers import set_gateway_parser
from jina.parsers.helper import _update_gateway_args
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
def run(*args, **kwargs):
runtime_args = set_gateway_parser().pars... | import sys
from jina.parsers import set_gateway_parser
from jina.parsers.helper import _update_gateway_args
from jina.serve.runtimes.gateway import GatewayRuntime
def run(*args, **kwargs):
runtime_cls = GatewayRuntime
print(f' args {args}')
runtime_args = set_gateway_parser().parse_args(args)
print(f... |
import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgP... | import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgP... |
import time
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text f... | import time
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text fo... |
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This cla... | import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This cla... |
"""**Load** module helps with serialization and deserialization."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import loads
from langchain_core.load.serializable import Serializable
... | """**Load** module helps with serialization and deserialization."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import loads
from langchain_core.load.serializable import Serializable
... |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
... |
"""Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""R... | """Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""R... |
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master',... | import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master',... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class Tes... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class Tes... |
"""Different ways to combine documents."""
from langchain.chains.combine_documents.reduce import (
acollapse_docs,
collapse_docs,
split_list_of_docs,
)
from langchain.chains.combine_documents.stuff import create_stuff_documents_chain
__all__ = [
"acollapse_docs",
"collapse_docs",
"create_stuff... | """Different ways to combine documents."""
from langchain.chains.combine_documents.reduce import (
acollapse_docs,
collapse_docs,
split_list_of_docs,
)
from langchain.chains.combine_documents.stuff import create_stuff_documents_chain
__all__ = [
"acollapse_docs",
"collapse_docs",
"split_list_o... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize as deserialize
from keras.src.activations import get as get
from keras.src.activations import serialize as serialize
from keras.src.activations.activati... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... |
import logging
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLLM
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts imp... | import logging
from typing import List
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLLM
from langchain_core.output_parsers import StrOutputParser
from la... |
"""**Load** module helps with serialization and deserialization."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import loads
from langchain_core.load.serializable im... | """**Load** module helps with serialization and deserialization."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import loads
from langchain_core.load.serializable im... |
# pyre-strict
# mypy: allow-untyped-defs
import abc
import os
from concurrent.futures import Future
from typing import Optional, Union
import torch.distributed as dist
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
from torch.distributed.checkpoint.planner import SavePlanner
from torch.distributed.c... | # pyre-strict
# mypy: allow-untyped-defs
import abc
import os
from concurrent.futures import Future
from typing import Optional, Union
import torch.distributed as dist
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
from torch.distributed.checkpoint.planner import SavePlanner
from torch.distributed.c... |
#!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
fr... | #!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
fr... |
_base_ = 'ssd300_coco.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
... | _base_ = 'ssd300_coco.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
... |
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py']
# Use ClassAwareSampler
train_dataloader = dict(
sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1))
| _base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py']
# Use ClassAwareSampler
data = dict(
train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1)))
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(... | from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_clas... | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_clas... |
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .a... | from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .au... |
from abc import abstractmethod
from typing import Iterable, Type
from docarray.document import BaseDocument
class AbstractDocumentArray(Iterable):
document_type: Type[BaseDocument]
@abstractmethod
def __init__(self, docs: Iterable[BaseDocument]):
...
@abstractmethod
def __class_getitem... | from abc import abstractmethod
from typing import Iterable, Type
from docarray.document import BaseDocument
from docarray.document.abstract_document import AbstractDocument
class AbstractDocumentArray(Iterable):
document_type: Type[BaseDocument]
@abstractmethod
def __init__(self, docs: Iterable[Abstrac... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledis... | import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTripletEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE... |
import torch
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseDistillKLDivLoss,
SparseEncoder,
SparseEncoderTrainer,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
... | import torch
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseDistillKLDivLoss,
SparseEncoder,
SparseEncoderTrainer,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
... |
import time
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text f... | import time
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text fo... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of r... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of r... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama, Dialog
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyC... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyC... |
from typing import TYPE_CHECKING, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_... | from typing import NamedTuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
T = TypeVar('T', bound='Mesh3DUrl')
class Mesh3DLoadResult(Na... |
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
... | import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
... |
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICE... | # ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICE... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.model.wrappers import (MM... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
from mmengine... |
from typing import Any
import torch
__all__ = [
"LSTM",
]
class LSTM(torch.ao.nn.quantizable.LSTM):
r"""A quantized long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
.... | # mypy: allow-untyped-defs
import torch
__all__ = [
"LSTM",
]
class LSTM(torch.ao.nn.quantizable.LSTM):
r"""A quantized long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
imports=['mmpretrain.... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in m... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path
import pytest
from mmengine.config.utils import (_get_external_cfg_base_path,
_get_package_and_cfg_path)
def test_get_external_cfg_base_path(tmp_path):
package_path = tmp_path
rel_cfg_path = os.path.join('cfg_d... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path
import pytest
from mmengine.config.utils import (_get_external_cfg_base_path,
_get_package_and_cfg_path)
def test_get_external_cfg_base_path(tmp_path):
package_path = tmp_path
rel_cfg_path = 'cfg_dir/cfg_file'
... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
... |
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,... | """
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
... |
_base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
# learning policy
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
... | _base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
optimizer_config = dict(
_delete_=True, ... |
import pathlib
from typing import Any, Union
import torch
from torchdata.datapipes.iter import CSVParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision... | import pathlib
from typing import Any, Dict, List, Tuple, Union
import torch
from torchdata.datapipes.iter import CSVParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffli... |
__version__ = '0.13.30'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.13.29'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are... | """Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are... |
from parameterized import parameterized
from torchaudio.io import AudioEffector
from torchaudio_unittest.common_utils import get_sinusoid, skipIfNoFFmpeg, TorchaudioTestCase
from .common import lt42
@skipIfNoFFmpeg
class EffectorTest(TorchaudioTestCase):
def test_null(self):
"""No effect and codec will ... | from parameterized import parameterized
from torchaudio.io import AudioEffector
from torchaudio_unittest.common_utils import get_sinusoid, skipIfNoFFmpeg, TorchaudioTestCase
from .common import lt42
@skipIfNoFFmpeg
class EffectorTest(TorchaudioTestCase):
def test_null(self):
"""No effect and codec will ... |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool... | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_d... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
B... | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmcv.runner import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP ... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'setup_multi_processes'
]
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import ... | import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import ... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s =... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s =... |
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-... | import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict, List
import numpy as np
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class ... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict, List
import numpy as np
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class ... |
import fastapi
from .config import Settings
from .middleware import auth_middleware
from .models import DEFAULT_USER_ID, User
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fastapi.Depends(a... | import fastapi
from .middleware import auth_middleware
from .models import User, DEFAULT_USER_ID, DEFAULT_EMAIL
from .config import Settings
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fa... |
import numpy as np
import pytest
from typing import Dict, List
from docarray import DocList
from docarray.base_doc import AnyDoc, BaseDoc
from docarray.typing import NdArray
def test_any_doc():
class InnerDocument(BaseDoc):
text: str
tensor: NdArray
class CustomDoc(BaseDoc):
inner: I... | import numpy as np
from docarray.base_doc import AnyDoc, BaseDoc
from docarray.typing import NdArray
def test_any_doc():
class InnerDocument(BaseDoc):
text: str
tensor: NdArray
class CustomDoc(BaseDoc):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', ... |
from collections import Counter
from typing import Callable, List
from llama_index.core.bridge.pydantic import Field
from llama_index.core.base.embeddings.base_sparse import (
BaseSparseEmbedding,
SparseEmbedding,
)
def get_default_tokenizer() -> Callable:
"""
Get default tokenizer.
NOTE: taken ... | from collections import Counter
from packaging import version
from typing import Any, Callable, List
from llama_index.core.bridge.pydantic import Field
from llama_index.core.base.embeddings.base_sparse import (
BaseSparseEmbedding,
SparseEmbedding,
)
def get_default_tokenizer() -> Callable:
"""
Get d... |
"""Test utils."""
from typing import List, Annotated
from llama_index.core.bridge.pydantic import Field
from llama_index.core.tools.utils import create_schema_from_function
def test_create_schema_from_function() -> None:
"""Test create schema from function."""
def test_fn(x: int, y: int, z: List[str]) -> N... | """Test utils."""
from typing import List, Annotated
from llama_index.core.bridge.pydantic import Field
from llama_index.core.tools.utils import create_schema_from_function
def test_create_schema_from_function() -> None:
"""Test create schema from function."""
def test_fn(x: int, y: int, z: List[str]) -> N... |
import os
from itertools import cycle
from pathlib import Path
import pytest
from doc_cache import DocCache
from jina import Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.parametrize('cache_fields', ['[content_hash]', '[id]'])
@pytest.mark.parametrize('value', [['a'... | import os
from itertools import cycle
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.parametrize('cache_fields', ['[content_hash]', '[id]'])
@pytest.mark.parametrize('value', [['a'], ['a', 'b']])
def test_cache(... |
import enum
from typing import Any, Callable, Dict, List, Tuple, Type, Union
import PIL.Image
import torch
from torch import nn
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision.prototype.transforms.utils import check_type
from torchvision.utils import _log_api_usage_once
class Transform(... | import enum
from typing import Any, Callable, Dict, List, Tuple, Type, Union
import PIL.Image
import torch
from torch import nn
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision.prototype.transforms._utils import _isinstance
from torchvision.utils import _log_api_usage_once
class Transfor... |
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_step... | import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_steps.md']
def check_raw_file_full(raw, lang="python", keyword_... |
"""Default prompt selectors."""
from llama_index.core.prompts import SelectorPromptTemplate
from llama_index.core.prompts.chat_prompts import (
CHAT_REFINE_PROMPT,
CHAT_REFINE_TABLE_CONTEXT_PROMPT,
CHAT_TEXT_QA_PROMPT,
CHAT_TREE_SUMMARIZE_PROMPT,
)
from llama_index.core.prompts.default_prompts import (... | """Default prompt selectors."""
from llama_index.core.prompts import SelectorPromptTemplate
from llama_index.core.prompts.chat_prompts import (
CHAT_REFINE_PROMPT,
CHAT_REFINE_TABLE_CONTEXT_PROMPT,
CHAT_TEXT_QA_PROMPT,
CHAT_TREE_SUMMARIZE_PROMPT,
)
from llama_index.core.prompts.default_prompts import (
... |
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
... | import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
... |
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1... | """All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1... |
import json
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
class SlackGetMessageSchema(BaseModel):
"""Input schema for SlackGetMessages."""
c... | import json
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
class SlackGetMessageSchema(BaseModel):
"""Input schema for SlackGetMessages."""
c... |
from dataclasses import dataclass
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
DEFAULT_EMAIL = "default@example.com"
# Using dataclass here to avoid adding dependency on pydantic
@dataclass(frozen=True)
class User:
user_id: str
email: str
phone_number: str
role: str
@classmethod
... | from dataclasses import dataclass
# Using dataclass here to avoid adding dependency on pydantic
@dataclass(frozen=True)
class User:
user_id: str
email: str
phone_number: str
role: str
@classmethod
def from_payload(cls, payload):
return cls(
user_id=payload["sub"],
... |
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = laye... | import numpy as np
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLooku... |
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" /... | # coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / ... |
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_800mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_gr... | _base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_800mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_gr... |
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while mai... | """
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while mai... |
import torch
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._video import Video
if _... | import torch
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._video import Video
if _... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.file_editor import KerasFileEditor
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope a... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_reg... |
import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url, verify_str_arg
from .vision import VisionDataset
class SVHN(VisionDataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Da... | import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url, verify_str_arg
from .vision import VisionDataset
class SVHN(VisionDataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers... |
import click
from .bump import bump
from .cmd_exec import cmd_exec
from .info import info
@click.group(short_help="Manage packages in the monorepo")
def pkg():
pass # pragma: no cover
pkg.add_command(info)
pkg.add_command(cmd_exec, name="exec")
pkg.add_command(bump)
| import click
from .cmd_exec import cmd_exec
from .info import info
@click.group(short_help="Manage packages in the monorepo")
def pkg():
pass # pragma: no cover
pkg.add_command(info)
pkg.add_command(cmd_exec, name="exec")
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUA... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUA... |
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| _base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray import Document
from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'... | from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray import Document
from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wra... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wra... |
"""
Checkpoint functionality for machine learning models.
This module provides classes for saving and loading model checkpoints in a distributed
training environment. It includes functionality for coordinating checkpoint operations
across multiple processes and customizing the checkpoint process through hooks.
Key co... | """
Checkpoint functionality for machine learning models.
This module provides classes for saving and loading model checkpoints in a distributed
training environment. It includes functionality for coordinating checkpoint operations
across multiple processes and customizing the checkpoint process through hooks.
Key co... |
_base_ = [
'./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py', # noqa: E501
]
dataset_type = 'MOTChallengeDataset'
detector = _base_.model
detector.pop('data_preprocessor')
del _base_.model
model = dict(
type='StrongSORT',
data_preprocessor=dict(
type='TrackDataPreprocessor',
... | _base_ = [
'./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py', # noqa: E501
]
dataset_type = 'MOTChallengeDataset'
detector = _base_.model
detector.pop('data_preprocessor')
del _base_.model
model = dict(
type='StrongSORT',
data_preprocessor=dict(
type='TrackDataPreprocessor',
... |
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
... | """
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
... |
"""Test the TextEmbed class."""
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.textembed import TextEmbedEmbedding
def test_textembed_class():
"""Check if BaseEmbedding is one of the base classes of TextEmbedEmbedding."""
assert issubclass(TextEmbedEmbedding, Base... | """Test the TextEmbed class."""
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.textembed import TextEmbedEmbedding
def test_textembed_class():
"""Check if BaseEmbedding is one of the base classes of TextEmbedEmbedding."""
assert issubclass(
TextEmbedEmbedd... |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... |
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
tra... | # dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
tra... |
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.l... | from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers import SimilarityFunction
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SpladePooling,
)
from sentence_transformers.sparse_encoder.evaluation.Sparse... |
from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__... | from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__... |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_sta... | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_sta... |
import pytest
import pytest_socket
import requests
def test_socket_disabled() -> None:
"""This test should fail."""
with pytest.raises(pytest_socket.SocketBlockedError):
# Ignore S113 since we don't need a timeout here as the request
# should fail immediately
requests.get("https://www.... | import pytest
import pytest_socket
import requests
def test_socket_disabled() -> None:
"""This test should fail."""
with pytest.raises(pytest_socket.SocketBlockedError):
# noqa since we don't need a timeout here as the request should fail immediately
requests.get("https://www.example.com") # ... |
"""Tool for agent to sleep."""
from asyncio import sleep as asleep
from time import sleep
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
clas... | """Tool for agent to sleep."""
from asyncio import sleep as asleep
from time import sleep
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
clas... |
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .runner import *
from .utils import *
| # Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .utils import *
|
from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
f... | from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
f... |
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from ...typing import T
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
... | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ...typing import T
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
... |
import importlib
class LazyModule:
def __init__(self, name, pip_name=None):
self.name = name
pip_name = pip_name or name
self.pip_name = pip_name
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
... | import importlib
class LazyModule:
def __init__(self, name, pip_name=None):
self.name = name
pip_name = pip_name or name
self.pip_name = pip_name
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
... |
import fastapi
from .config import Settings
from .middleware import auth_middleware
from .models import DEFAULT_USER_ID, User
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fastapi.Depends(a... | import fastapi
from .middleware import auth_middleware
from .models import User, DEFAULT_USER_ID, DEFAULT_EMAIL
from .config import Settings
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fa... |
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
... | from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_roi_head import BaseRoIHead
from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,
DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,
Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)
from .cascade_roi_head import Cas... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_roi_head import BaseRoIHead
from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,
DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,
Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)
from .cascade_roi_head import Cas... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .quasi_dense_tracker import QuasiDenseTracker
__all__ = ['BaseTracker', 'ByteTracker', 'QuasiDenseTracker']
| # Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
__all__ = ['BaseTracker', 'ByteTracker']
|
import pytest
import datasets
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for ite... | import pytest
import datasets
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for ite... |
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1... | """All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1... |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
import shutil
import tempfile
import unittest
from transformers import Owlv2Processor
from transformers.testing_utils import require_scipy
from ...test_processing_common import ProcessorTesterMixin
@require_scipy
class Owlv2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Owlv2Processor... | import shutil
import tempfile
import unittest
import pytest
from transformers import Owlv2Processor
from transformers.testing_utils import require_scipy
from ...test_processing_common import ProcessorTesterMixin
@require_scipy
class Owlv2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class =... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.