input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.2... | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.2... |
import pytest
from hubble.executor.hubio import HubIO
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_pod_parser
@pytest.mark.skip('jinahub not available')
@pytest.mark.parametrize('uses', ['jinaai+docker://jina-ai/DummyExecutor'])
def test_container_pod(mocker, monkeypatch, uses):
... | import pytest
from hubble.executor.hubio import HubIO
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_pod_parser
@pytest.mark.parametrize('uses', ['jinaai+docker://jina-ai/DummyExecutor'])
def test_container_pod(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_pul... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestGLIP(TestCas... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestGLIP(TestCas... |
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request impor... | from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request impor... |
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussia... | # Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussia... |
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def... | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def... |
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natura... | """
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natura... |
"""Test Fireworks API wrapper.
In order to run this test, you need to have an Fireworks api key.
You can get it by registering for free at https://api.fireworks.ai/.
A test key can be found at https://api.fireworks.ai/settings/api-keys
You'll then need to set FIREWORKS_API_KEY environment variable to your api key.
""... | """Test Fireworks API wrapper.
In order to run this test, you need to have an Fireworks api key.
You can get it by registering for free at https://api.fireworks.ai/.
A test key can be found at https://api.fireworks.ai/settings/api-keys
You'll then need to set FIREWORKS_API_KEY environment variable to your api key.
""... |
from typing import Any # noqa: F401
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_base = type(TorchTensor) # type: Any
embedding_base = type(EmbeddingMixin) # t... | from typing import Any # noqa: F401
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_base = type(TorchTensor) # type: Any
embedding_base = type(EmbeddingMixin) # t... |
from typing import TYPE_CHECKING, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUr... | from typing import TYPE_CHECKING, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.networks import Parts
T = TypeVar('T', bound='AnyUrl')
... |
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.huggingface import HuggingFaceLLM
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceLLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
| from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.huggingface import HuggingFaceInferenceAPI, HuggingFaceLLM
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceInferenceAPI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
names_of_base_cla... |
import numpy as np
from docarray import Image
from docarray.typing import Tensor
def test_image():
image = Image(uri='http://jina.ai')
image.tensor = image.uri.load()
assert isinstance(image.tensor, np.ndarray)
| from docarray import Image
from docarray.typing import Tensor
def test_image():
image = Image(uri='http://jina.ai')
image.tensor = image.uri.load()
assert isinstance(image.tensor, Tensor)
|
from llama_index_instrumentation.span_handlers.base import BaseSpanHandler, T # noqa
| import inspect
import threading
from abc import abstractmethod
from typing import Any, Dict, List, Generic, Optional, TypeVar
from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr, ConfigDict
from llama_index.core.instrumentation.span.base import BaseSpan
T = TypeVar("T", bound=BaseSpan)
class ... |
from typing import Union, TextIO, BinaryIO, TYPE_CHECKING, Type
if TYPE_CHECKING:
from docarray.typing import T
class CommonIOMixin:
"""The common IO helper function for arrays."""
def save(
self,
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: ... | from typing import Union, TextIO, BinaryIO, TYPE_CHECKING, Type
if TYPE_CHECKING:
from ....typing import T
class CommonIOMixin:
"""The common IO helper function for arrays."""
def save(
self,
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: str =... |
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
import numpy as np
from sentence_transformers.cross_encoder import CrossEncoder
# Pre-trained cros... | """
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
from sentence_transformers.cross_encoder import CrossEncoder
import numpy as np
# Pre-trained cross... |
"""Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..compat import import_cupy
from ..core import DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM wi... | """Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..compat import import_cupy
from ..core import DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM wi... |
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.layer import Layer
from keras.src.ops import operation_utils
@keras_export("keras.layers.Reshape")
class Reshape(Layer):
"""Layer that reshapes inputs into th... | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.layer import Layer
from keras.src.ops import operation_utils
@keras_export("keras.layers.Reshape")
class Reshape(Layer):
"""Layer that reshapes inputs into th... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType
from ..util... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from ..utils.misc import unpack_gt_instance... |
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""torchaudio.pipelines.SourceSeparationBundle()
Dataclass that bundles components... | from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base
@dataclass
class SourceSeparationBundle:
"""torchaudio.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performin... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.msword import MsWordParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opt... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.msword import MsWordParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opt... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
image_size = (640, 640)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model =... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(
type='F... |
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_A... | import json
import re
from typing import Pattern, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final... |
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseM... | from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseM... |
"""Gemini embeddings file."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
imp... | """Gemini embeddings file."""
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generat... |
import gzip
from os import PathLike
from pathlib import Path
from typing import Union
import pytest
import yaml
from vcr import VCR
from vcr.persisters.filesystem import CassetteNotFoundError
from vcr.request import Request
class CustomSerializer:
"""Custom serializer for VCR cassettes using YAML and gzip.
... | import base64
import gzip
import pytest
from vcr import VCR # type: ignore[import-untyped]
from vcr.serializers import yamlserializer # type: ignore[import-untyped]
class YamlGzipSerializer:
@staticmethod
def serialize(cassette_dict: dict) -> str:
raw = yamlserializer.serialize(cassette_dict).encod... |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, TransformersTokenizerWrapper, WordTokenizer
__all__ = [
"WordTokenizer",
"WhitespaceTokenizer",
"PhraseTokenizer",
"ENGLIS... | from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
|
from collections import OrderedDict
from typing import Any, Dict, Optional, Type, cast
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.response_synthesizers import Refine
from llama_index.core.response_synthesizers.refine import StructuredRefineResponse
from llama_index.core.... | from collections import OrderedDict
from typing import Any, Dict, Optional, Type, cast
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.response_synthesizers import Refine
from llama_index.core.response_synthesizers.refine import StructuredRefineResponse
from llama_index.core.... |
from typing import Any, Dict, Optional, Sequence
from llama_index.core.base.base_selector import (
BaseSelector,
SelectorResult,
SingleSelection,
)
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.indices.query.embedding_utils import get_top_k_embeddings
from llama_inde... | from typing import Any, Dict, Optional, Sequence
from llama_index.core.base.base_selector import (
BaseSelector,
SelectorResult,
SingleSelection,
)
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.indices.query.embedding_utils import get_top_k_embeddings
from llama_inde... |
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from ... | """Pass input through a moderation endpoint."""
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_vali... |
from __future__ import annotations
import json
import logging
import os
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: list[str], word_weights:... | from __future__ import annotations
import json
import logging
import os
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: list[str], word_weights:... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixture... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import numpy as np
import pytest
from jina import Document, DocumentArray
from ...transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixture()
def docs_generator():
return ... |
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
... | from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions... |
import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metada... | import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metada... |
from abc import ABC
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
import trimesh
from pydantic import BaseConfig
from pydantic.fields impo... | from abc import ABC
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
import trimesh
from pydantic import BaseConfig
from pydantic.fields import ModelField
MESH_FILE_FORMATS = ('obj', 'glb', 'ply')
T... |
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .fileio import *
from .utils import *
| # Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .utils import *
|
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import gzip
import lzma
import time
import faiss
import numpy as np
######## Functions to find and... | """
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import faiss
import numpy as np
import time
import gzip
import lzma
######## Functions to find and s... |
from jina.orchestrate.pods.factory import PodFactory
from tests.helper import _generate_pod_args
def test_pod_instantiate_start_same_context():
arg = _generate_pod_args()
pod_args = [arg, arg]
for args in pod_args:
pod = PodFactory.build_pod(args)
with pod:
pass
def test_pod... | from jina.parsers import set_pod_parser
from jina.orchestrate.pods.factory import PodFactory
def test_pod_instantiate_start_same_context():
arg = set_pod_parser().parse_args([])
pod_args = [arg, arg]
for args in pod_args:
pod = PodFactory.build_pod(args)
with pod:
pass
def t... |
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .CosineSimilarityLoss import CosineSimilarityLoss
from .SoftmaxLoss import SoftmaxLoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .TripletLoss i... | from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .CosineSimilarityLoss import CosineSimilarityLoss
from .SoftmaxLoss import SoftmaxLoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .TripletLoss i... |
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .yolox_mode_switch_hoo... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .memory_profiler_hook import MemoryProfilerHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOX... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
@MODELS.register_module()
class ... | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class ChannelMapper(BaseModule):
r"""Channel Mapper to reduce/increase channels of backbone features.
This i... |
from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCL... | from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCL... |
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
i... | from typing import Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
i... |
from docarray.documents.audio import Audio
from docarray.documents.image import Image
from docarray.documents.mesh import Mesh3D
from docarray.documents.point_cloud import PointCloud3D
from docarray.documents.text import Text
from docarray.documents.video import Video
__all__ = ['Text', 'Image', 'Audio', 'Mesh3D', 'Po... | from docarray.documents.audio import Audio
from docarray.documents.image import Image
from docarray.documents.mesh import Mesh3D
from docarray.documents.point_cloud import PointCloud3D
from docarray.documents.text import Text
__all__ = ['Text', 'Image', 'Audio', 'Mesh3D', 'PointCloud3D']
|
from os.path import join
from pathlib import Path
from typing import Any, Callable, Optional, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brend... | from os.path import join
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://git... |
_base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(
_delete_=True,
type='FPG',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
inter_channels=256,
num_outs=5,
add_extra_c... | _base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(
_delete_=True,
type='FPG',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
inter_channels=256,
num_outs=5,
add_extra_c... |
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import gzip
import os
import tarfile
impo... | """
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import gzip
import os
import tarfile
impo... |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = d... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = d... |
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import nn
from sentence_transformers.models.Module import Module
class CNN(Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
con... | from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,... |
"""
This script translates the queries in the MS MARCO dataset to the defined target languages.
For machine translation, we use EasyNMT: https://github.com/UKPLab/EasyNMT
You can install it via: pip install easynmt
Usage:
python translate_queries [target_language]
"""
import logging
import os
import sys
import tarfi... | """
This script translates the queries in the MS MARCO dataset to the defined target languages.
For machine translation, we use EasyNMT: https://github.com/UKPLab/EasyNMT
You can install it via: pip install easynmt
Usage:
python translate_queries [target_language]
"""
import logging
import os
import sys
import tarfi... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import RerankingEvaluator
from sentence_transformers.util import cos_sim
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import RerankingEvaluator
from sentence_transformers.util import cos_sim
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse... |
import os
from typing import Tuple
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.basic_models import run_custom_objective
class TestGPUBasicModels:
def run_cls(self, X: np.ndarray, y: np.ndarray) -> Tuple[int, int]:
cls = xgb.XGBClassifier(... | import os
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
sys.path.append("tests/python")
import test_basic_models as test_bm
# Don't import the test class, otherwise they will run twice.
import test_callback as test_cb # noqa
rng = np.random.RandomState(1994)
... |
from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ... | from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def ... |
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image_tensor(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(inpt, np.ndarray)... | from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image_tensor(image: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(image, np.ndarra... |
"""LLMResult class."""
from __future__ import annotations
from copy import deepcopy
from typing import Literal, Optional, Union
from pydantic import BaseModel
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.generation import Generation, GenerationCh... | """LLMResult class."""
from __future__ import annotations
from copy import deepcopy
from typing import Literal, Optional, Union
from pydantic import BaseModel
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.generation import Generation, GenerationCh... |
from typing import Any, Dict, List, Optional, Union
from huggingface_hub.utils import get_session
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(... | from typing import Any, Dict, List, Optional, Union
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
http_get,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(DatasetsError):
"""Dataset vi... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k... |
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export("keras.optimizers.SGD")
class SGD(optimizer.Optimizer):
"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum` is 0:
``... | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export("keras.optimizers.SGD")
class SGD(optimizer.Optimizer):
"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum` is 0:
``... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_c... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_c... |
import torch
from torch import Tensor
class ImageList:
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
Args:
tensors (tensor): Tensor co... | from typing import List, Tuple
import torch
from torch import Tensor
class ImageList:
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
Args:
... |
from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union
from pydantic import BaseModel, validator
from docarray.math.ndarray import to_list
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import ArrayType
# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unio... | from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union
from pydantic import BaseModel, validator
from docarray.math.ndarray import to_list
if TYPE_CHECKING:
from docarray.typing import ArrayType
# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unions
_ProtoValueType =... |
_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model setting
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
init_cfg=dict(
... | _base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model setting
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
init_cfg=dict(
... |
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry, build_runner_from_cfg
# manage all kinds of runners lik... | # Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry, build_runner_from_cfg
# manage all kinds of runners lik... |
from ._transforms import BarkScale, BarkSpectrogram, ChromaScale, ChromaSpectrogram, InverseBarkScale
__all__ = [
"BarkScale",
"BarkSpectrogram",
"ChromaScale",
"ChromaSpectrogram",
"InverseBarkScale",
]
| from ._transforms import BarkScale, BarkSpectrogram, InverseBarkScale
__all__ = [
"BarkScale",
"BarkSpectrogram",
"InverseBarkScale",
]
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseTranslationEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/spl... | import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTranslationEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SP... |
"""Tool for interacting with a single API with natural language definition."""
from __future__ import annotations
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_community.chains.openapi.chain import OpenAPIEndpointCh... | """Tool for interacting with a single API with natural language definition."""
from __future__ import annotations
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_community.chains.openapi.chain import OpenAPIEndpointCh... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser... |
import os
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.openai.base import ChatMessage, MessageRole
from llama_index.llms.asi import ASI
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in ASI.__mro__]
... | import os
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.openai.base import ChatMessage, MessageRole
from llama_index.llms.asi import ASI
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in ASI.__mro__]
... |
from functools import wraps
from typing import TYPE_CHECKING, List
from jina.excepts import FlowBuildLevelError
# noinspection PyUnreachableCode
if TYPE_CHECKING: # pragma: no cover
from jina.enums import FlowBuildLevel
from jina.orchestrate.flow.base import Flow
def allowed_levels(levels: List['FlowBuildL... | from functools import wraps
from typing import TYPE_CHECKING, List
from jina.excepts import FlowBuildLevelError
# noinspection PyUnreachableCode
if TYPE_CHECKING: # pragma: no cover
from jina.enums import FlowBuildLevel
from jina.orchestrate.flow.base import Flow
def allowed_levels(levels: List['FlowBuildLe... |
from llama_index_instrumentation.span_handlers.simple import SimpleSpanHandler # noqa
| import inspect
from typing import Any, Dict, cast, List, Optional, TYPE_CHECKING
from llama_index.core.instrumentation.span.simple import SimpleSpan
from llama_index.core.instrumentation.span_handlers.base import BaseSpanHandler
from datetime import datetime
from functools import reduce
import warnings
if TYPE_CHECKIN... |
from __future__ import annotations
__version__ = "5.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_mode... | from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_mode... |
from jina import DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
def test_no_documents():
test_docs = DocumentArray()
f = Flow().add(uses=CLIPTextEncoder)
with f:
f.search(test_docs, {})
assert len(test_docs) == 0 # SUCCESS
| from jina import DocumentArray, Flow
from jinahub.encoder.clip_text import CLIPTextEncoder
def test_no_documents():
test_docs = DocumentArray()
f = Flow().add(uses=CLIPTextEncoder)
with f:
f.search(test_docs, {})
assert len(test_docs) == 0 # SUCCESS
|
import inspect
import re
from hashlib import sha256
from typing import List
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_... | import inspect
import re
from hashlib import sha256
from typing import List
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .text import text
def _hash_python_lines(lines: List[str]) -> ... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('c... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='MMDetec... |
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseMSEEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("nav... | import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseMSEEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE mod... |
__all__ = ['filter_docs']
import json
from typing import Dict, List, Union
from docarray.array.any_array import AnyDocArray
from docarray.array.doc_list.doc_list import DocList
def filter_docs(
docs: AnyDocArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocArray:
"""
Filter the Documents in the... | __all__ = ['filter_docs']
import json
from typing import Dict, List, Union
from docarray.array.any_array import AnyDocArray
from docarray.array.doc_list.doc_list import DocList
def filter_docs(
docs: AnyDocArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocArray:
"""
Filter the Documents in the ... |
import os
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
def test_config():
ranker = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'query_features': ['query'],
'match_features': ['match'],
... | import os
from pathlib import Path
import pytest
from jina import Executor
def test_config():
ranker = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'query_features': ['query'],
'match_features': ['match'],
'relevance_labe... |
# Owner(s): ["module: dynamo"]
import torch
import torch._dynamo
import torch._dynamo.test_case
import torch._functorch
from torch._dynamo.precompile_context import PrecompileContext
from torch._functorch import config as functorch_config
from torch._functorch._aot_autograd.autograd_cache import (
BundledAOTAutogr... | # Owner(s): ["module: dynamo"]
import torch
import torch._dynamo
import torch._dynamo.test_case
import torch._functorch
from torch._dynamo.precompile_context import PrecompileContext
from torch._functorch import config as functorch_config
from torch._functorch._aot_autograd.autograd_cache import (
BundledAOTAutogr... |
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_... | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
... |
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:par... | """Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:par... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow()... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow, Document
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
text='it is a good day! the dog sits on the floor.')
yield doc
def test_... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Con... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.saved_model import ExportArchive as ExportArchive
| """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.saved_model import ExportArchive
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import AzureChatOpenAI
class TestOpenAIStandard(ChatModelUnitTests):
@property... | """Standard LangChain interface tests"""
from typing import Tuple, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import AzureChatOpenAI
class TestOpenAIStandard(Ch... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.device import (get_device, is_cuda_available, is_mlu_available,
is_mps_available, is_musa_available,
is_npu_available)
def test_get_device():
device = get_device()
if is_npu_available():
... | # Copyright (c) OpenMMLab. All rights reserved.
from mmengine.device import (get_device, is_cuda_available, is_mlu_available,
is_mps_available, is_npu_available)
def test_get_device():
device = get_device()
if is_npu_available():
assert device == 'npu'
elif is_cuda_ava... |
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_dipu_available, is_mlu_available, is_mps_available,
is_npu_available, is_npu_support_full_precision)
__all__ = [
'get_max_cuda_memory', 'get_device', 'i... | # Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_mlu_available, is_mps_available, is_npu_available,
is_npu_support_full_precision)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import BeautifulSoupTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling o... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import BeautifulSoupTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling o... |
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import AspectRatioBatchSampler
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'... | # Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import AspectRatioBatchSampler
from .class_aware_sampler import ClassAwareSampler
__all__ = ['ClassAwareSampler', 'AspectRatioBatchSampler']
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adamax"])
class Adamax(optimizer.Optimizer):
"""Optimizer that implements the Adamax algorithm.
Adamax, a variant of Adam based on the infinity norm, is a first-... | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adamax"])
class Adamax(optimizer.Optimizer):
"""Optimizer that implements the Adamax algorithm.
Adamax, a variant of Adam based on the infinity norm, is a first-... |
import os
import time
import pytest
import requests as general_requests
from jina import Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def executor_images_built():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir, 'executor1'), tag='enc... | import os
import time
import pytest
import requests as general_requests
from jina import Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def executor_images_built():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir, 'executor1'), tag='enc... |
from torchaudio._internal import module_utils as _mod_utils
from . import (
sox_utils,
)
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
__all__ = [
"download_asset",
"sox_utils",
]
| from torchaudio._internal import module_utils as _mod_utils
from . import (
sox_utils,
)
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
|
from typing import Any, Dict
from pydantic.tools import parse_obj_as
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import ID, AnyUrl, Embedding, ImageUrl, Tensor
class ProtoMi... | from typing import Any, Dict, Type
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import Tensor
from ..abstract_document import AbstractDocument
from ..base_node import BaseNode
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def _get_nested_document_class(cls, field: st... |
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model, create_model_from_typeddict
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray import BaseDoc
if TYPE_CHECKING:
from pydantic.typing import AnyClassMethod
... | from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model, create_model_from_typeddict
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray import BaseDoc
if TYPE_CHECKING:
from pydantic.typing import AnyClassMethod
... |
import json
from typing import Any, Callable, Optional, Union
from langchain_core.utils.json import parse_json_markdown
from langchain.evaluation.schema import StringEvaluator
class JsonEditDistanceEvaluator(StringEvaluator):
"""
An evaluator that calculates the edit distance between JSON strings.
This... | import json
from typing import Any, Callable, Optional, Union
from langchain_core.utils.json import parse_json_markdown
from langchain.evaluation.schema import StringEvaluator
class JsonEditDistanceEvaluator(StringEvaluator):
"""
An evaluator that calculates the edit distance between JSON strings.
This... |
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
emb... | _base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
em... |
__version__ = '0.13.23'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.13.22'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.