input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dic... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
model = dict(
type='PanopticFPN',
img_n... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytes... | import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pi... |
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from mmengine.registry import build_model_from_cfg, build_runner_from_cfg
from .registry... | # Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry, build_runner_from_cfg
# manage all kinds of runners lik... |
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new l... | import torch
from torch import nn, Tensor
from typing import Iterable, Dict
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new la... |
from langchain_core.utils.json_schema import (
_dereference_refs_helper,
_infer_skip_keys,
_retrieve_ref,
dereference_refs,
)
__all__ = [
"_dereference_refs_helper",
"_infer_skip_keys",
"_retrieve_ref",
"dereference_refs",
]
| from langchain_core.utils.json_schema import (
_dereference_refs_helper,
_infer_skip_keys,
_retrieve_ref,
dereference_refs,
)
__all__ = [
"_retrieve_ref",
"_dereference_refs_helper",
"_infer_skip_keys",
"dereference_refs",
]
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .LambdaLoss import (
LambdaLoss,
LambdaRankScheme,
NDCGLoss1Scheme,
NDCGLo... | from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .LambdaLoss import (
LambdaLoss,
LambdaRankScheme,
NDCGLoss1Scheme,
NDCGLo... |
_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
... | _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
"""Language models.
**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as ... | """**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as oppose... |
# Copyright (c) OpenMMLab. All rights reserved.
from .evaluator import Evaluator
from .metric import BaseMetric, DumpResults
from .utils import get_metric_value
__all__ = ['BaseMetric', 'Evaluator', 'get_metric_value', 'DumpResults']
| # Copyright (c) OpenMMLab. All rights reserved.
from .evaluator import Evaluator
from .metric import BaseMetric
from .utils import get_metric_value
__all__ = ['BaseMetric', 'Evaluator', 'get_metric_value']
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except Import... | from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'Embedding',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import ChatGPTLoader
from langchain_community.document_loaders.chatgpt import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Used to conso... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import ChatGPTLoader
from langchain_community.document_loaders.chatgpt import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Used to conso... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluatorFromDataFrame
if TYPE_CHECKING:
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class Spa... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluatorFromDataFrame
if TYPE_CHECKING:
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class Spa... |
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_threshold=0.5),
... | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_threshold=0.5),
... |
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
preprocess_cfg=pre... | _base_ = './mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',... |
import sys
import uuid
from typing import Any, Optional
from uuid import UUID
import pytest
from langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandler
from langchain_core.callbacks.manager import (
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.runnables import Runnab... | import sys
import uuid
from typing import Any, Optional
from uuid import UUID
import pytest
from langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandler
from langchain_core.callbacks.manager import (
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.runnables import Runnab... |
import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.y... | import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.y... |
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
class TestHuggingFa... | """Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
class TestHuggingFa... |
from typing import Iterable, Dict
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ... | from typing import Iterable, Dict
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ... |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to... | import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (:obj:`str` or :obj:`Path`, optional): Specify a cache directory to save the file to (ov... |
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` an... | # Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` an... |
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
R... | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | import numpy as np
import pytest
from docarray.proto import DocProto, NodeProto
from docarray.typing import NdArray
@pytest.mark.proto
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(custom_nd... |
"""
Example of training with Dask on GPU
====================================
"""
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def using_d... | """
Example of training with Dask on GPU
====================================
"""
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost.dask import Das... |
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using... | """
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.jaxarray import JaxArray, metaJax
T = TypeVar('T', bound='AudioJaxArray')
@_register_proto(proto_type_name='audio_jaxar... |
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader, find_classes, make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Imagenette(VisionDataset):
"""`Imagenette <https://github.com/fa... | from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .folder import find_classes, make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Imagenette(VisionDataset):
"""`Imagenette <https://github... |
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
... | """**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
... |
from typing import Any
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.integration_tests import RetrieversIntegrationTests
class ParrotRetriever(BaseRetriever):
parrot_name: str
k: int = 3
def _get_relevant_documents(self, query: st... | from typing import Any, Type
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.integration_tests import RetrieversIntegrationTests
class ParrotRetriever(BaseRetriever):
parrot_name: str
k: int = 3
def _get_relevant_documents(self, que... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_stream... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss... |
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ... | # Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ... |
import itertools
import torch
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoCtcDecoder,
TempDirMixin,
TorchaudioTestCase,
)
NUM_TOKENS = 8
@skipIfNoCtcDecoder
class CTCDecoderTest(TempDirMixin, TorchaudioTestCase):
def _get_decoder... | import itertools
import torch
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoCtcDecoder,
TempDirMixin,
TorchaudioTestCase,
)
NUM_TOKENS = 8
@skipIfNoCtcDecoder
class CTCDecoderTest(TempDirMixin, TorchaudioTestCase):
def _get_decode... |
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
.. warning::
Starting with version 2.8, we are refactoring TorchAudio to transition it
into a maintenance phase. As a result:
- Some APIs... | """Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torchaudio
sox_ext = torchaudio._extension.lazy_import_sox_ext()
from torchaudio._internal.module_utils im... |
from typing import Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import (
ID,
AnyUrl,
Embedding,
ImageUrl,
Tensor,
TextUrl,
T... | from typing import Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import ID, AnyUrl, Embedding, ImageUrl, Tensor, TorchTensor
T = TypeVar('T', bound='Pro... |
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b4.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimi... | _base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b4.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimi... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
... | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
... |
"""Test ZhipuAI Chat Model."""
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_core.tools import tool
from langchain_community.chat_models.zhipuai impo... | """Test ZhipuAI Chat Model."""
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_core.tools import tool
from langchain_community.chat_models.zhipuai impo... |
import logging
import os
from abc import abstractmethod
from typing import TYPE_CHECKING, Optional
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
if TYPE_CHECKING:
from fastapi import FastAPI
class FastAPIBaseGateway(BaseGateway):
"""Base FastAPI gateway. Implement thi... | import logging
import os
from abc import abstractmethod
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
class FastAPIBaseGateway(BaseGateway):
"""Base FastAPI gateway. Implement this abstract class in-case you want to build a fastapi-based Gateway... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:... | # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from sentencepiece import SentencePieceProcessor
from logging import getLogger
from typing import List
import os
logger = getLogger()
class Tokenizer:... |
from pydantic import AnyUrl as BaseAnyUrl
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
class AnyUrl(BaseAnyUrl, BaseNode):
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should
be c... | from pydantic import AnyUrl as BaseAnyUrl
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
class AnyUrl(BaseAnyUrl, BaseNode):
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should be called wh... |
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client: Client) -> None:
# generate some random data for demonstration
... | """
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client: Client) -> None:
# generate some random data for demonstration
... |
import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.t... | import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.t... |
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image_tensor(image: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(image, np.ndarra... | from typing import Any, Dict, Tuple, Union
import numpy as np
import PIL.Image
import torch
from torchvision.io.video import read_video
from torchvision.prototype import features
from torchvision.prototype.utils._internal import ReadOnlyTensorBuffer
from torchvision.transforms import functional as _F
@torch.jit.unus... |
_base_ = 'faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_400mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=... | _base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_400mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init... |
from jina import DocumentArray, Executor, Flow, requests
def test_needs_docs_map():
class TestMergeDictDocMatrixExecutor(Executor):
@requests()
def foo(self, docs_map, **kwargs):
assert {'exec0', 'exec1'} == set(docs_map.keys())
f = (
Flow()
.add(name='exec0')
... | from jina import Flow, Executor, requests, DocumentArray
def test_needs_docs_map():
class TestMergeDictDocMatrixExecutor(Executor):
@requests()
def foo(self, docs_map, **kwargs):
assert {'exec0', 'exec1'} == set(docs_map.keys())
f = Flow().add(name='exec0'). \
add(name='e... |
from __future__ import annotations
import time
import torch
from torch._dynamo import device_interface # noqa: PLC2701 import-private-name
class DeviceProperties:
def __init__(self) -> None:
self.major = 8 # TODO: bypass check for H100 in triton_heuristics.py
self.max_threads_per_multi_process... | from __future__ import annotations
import time
import torch
from torch._dynamo import device_interface # noqa: PLC2701 import-private-name
class DeviceProperties:
def __init__(self) -> None:
self.major = 8 # TODO: bypass check for H100 in triton_heuristics.py
self.max_threads_per_multi_process... |
"""This file should contain all tests that need access to the internet (apart
from the ones in test_datasets_download.py)
We want to bundle all internet-related tests in one file, so the file can be
cleanly ignored in FB internal test infra.
"""
import os
import pathlib
from urllib.error import URLError
import pytes... | """This file should contain all tests that need access to the internet (apart
from the ones in test_datasets_download.py)
We want to bundle all internet-related tests in one file, so the file can be
cleanly ignored in FB internal test infra.
"""
import os
from urllib.error import URLError
import pytest
import torchv... |
"""Arg pack components."""
from typing import Any, Callable, Dict, Optional
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
class ArgPackComponent(QueryComponent):
"""
Arg pack component.
Pa... | """Arg pack components."""
from typing import Any, Callable, Dict, Optional
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
class ArgPackComponent(QueryComponent):
"""Arg pack component.
Packs a... |
_base_ = 'faster-rcnn_r50_fpg_crop640-50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(out_channels=128, inter_channels=128),
rpn_head=dict(in_channels=128),
roi_head=dict(
bbox_roi_extractor=dict(out_channels=128),
bbox_head=dict(in_channels=128)))
| _base_ = 'faster-rcnn_r50_fpn_crop640-50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(out_channels=128, inter_channels=128),
rpn_head=dict(in_channels=128),
roi_head=dict(
bbox_roi_extractor=dict(out_channels=128),
bbox_head=dict(in_channels=128)))
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops import point_sample
from torch import Tensor
def get_uncertainty(mask_pred: Tensor, labels: Tensor) -> Tensor:
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
predict... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops import point_sample
def get_uncertainty(mask_pred, labels):
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
prediction in 'mask_pred' for the foreground class in `cla... |
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overr... | # Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_batch_norm, has_method,
import_modules_from_strings, is_... |
from . import assert_when_ready
def test_text_search(simple_index_with_docs): # noqa: F811
simple_index, docs = simple_index_with_docs
query_string = "Python is a valuable skill"
expected_text = docs[0].text
def pred():
docs, scores = simple_index.text_search(
query=query_string... | from . import assert_when_ready
def test_text_search(simple_index_with_docs): # noqa: F811
simple_index, docs = simple_index_with_docs
query_string = "Python is a valuable skill"
expected_text = docs[0].text
def pred():
docs, scores = simple_index.text_search(
query=query_string... |
import os
from typing import Union
from .filesystem import FileSystemReader, FileSystemWriter
from .storage import StorageReader, StorageWriter
def _storage_setup(
storage: Union[StorageReader, StorageWriter, None],
checkpoint_id: Union[str, os.PathLike, None],
reader: bool = False,
) -> Union[None, Stor... | import os
from typing import Union
from .filesystem import FileSystemReader, FileSystemWriter
from .storage import StorageReader, StorageWriter
def _storage_setup(
storage: Union[StorageReader, StorageWriter, None],
checkpoint_id: Union[str, os.PathLike, None],
reader: bool = False,
) -> Union[None, Stor... |
import asyncio
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
... | import asyncio
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
... |
import unittest
import torch
import torch.nn.functional as F
from diffusers import VQDiffusionScheduler
from .test_schedulers import SchedulerCommonTest
class VQDiffusionSchedulerTest(SchedulerCommonTest):
scheduler_classes = (VQDiffusionScheduler,)
def get_scheduler_config(self, **kwargs):
config... | import torch
import torch.nn.functional as F
from diffusers import VQDiffusionScheduler
from .test_schedulers import SchedulerCommonTest
class VQDiffusionSchedulerTest(SchedulerCommonTest):
scheduler_classes = (VQDiffusionScheduler,)
def get_scheduler_config(self, **kwargs):
config = {
... |
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser instance to which we add arguments
""... | """Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser... |
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import ops
from keras.src import testing
class AutoContrastTest(testing.TestCase, parameterized.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_t... | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import ops
from keras.src import testing
class AutoContrastTest(testing.TestCase, parameterized.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_t... |
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imp... | from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imp... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(Tes... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(Tes... |
import logging
import os
import torch
from torchaudio._internal import download_url_to_file, module_utils as _mod_utils
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"a... | import logging
import os
import torch
from torchaudio._internal import download_url_to_file, module_utils as _mod_utils
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"a... |
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da ... | import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da ... |
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
T = TypeVar('T', bound='Url... | from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
from pydantic impo... |
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
fro... | from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
fro... |
import pytest
import subprocess
import os
from typing import Generator
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.storage.index_store.gel import (
GelIndexStore,
)
from llama_index.storage.kvstore.gel import GelKVStore
try:
import gel # noqa
no_packages = False
exc... | import pytest
import subprocess
from typing import Generator
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.storage.index_store.gel import (
GelIndexStore,
)
from llama_index.storage.kvstore.gel import GelKVStore
try:
import gel # noqa
no_packages = False
except Import... |
import requests
from docarray import DocumentArray
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={
'n_dim': 100,
'ef': 100,
'ef_construction': 100,
'max_connections': 16,
'dynamic_ef_min': 50,
... | import requests
from docarray import DocumentArray
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={
'n_dim': 100,
'ef': 100,
'ef_construction': 100,
'max_connections': 16,
'dynamic_ef_min': 50,
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wra... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashi... |
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T =... | from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T =... |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_typ... | from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_typ... |
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]... | import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import (
LlamaIndexGraphRetriever,
LlamaIndexRetriever,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising de... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import (
LlamaIndexGraphRetriever,
LlamaIndexRetriever,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising de... |
from llama_index.core.storage.chat_store.base import BaseChatStore
from llama_index.core.storage.chat_store.simple_chat_store import SimpleChatStore
RECOGNIZED_CHAT_STORES = {
SimpleChatStore.class_name(): SimpleChatStore,
}
def load_chat_store(data: dict) -> BaseChatStore:
"""Load a chat store from a dict."... | from llama_index.core.storage.chat_store.base import BaseChatStore
from llama_index.core.storage.chat_store.simple_chat_store import SimpleChatStore
RECOGNIZED_CHAT_STORES = {
SimpleChatStore.class_name(): SimpleChatStore,
}
def load_chat_store(data: dict) -> BaseChatStore:
"""Load a chat store from a dict."... |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
class MockPetrel:
_allow_symlink = False
def __init__(self):
pass
@property
def name(self):
return self.__class__.__name__... | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
class MockPetrel:
_allow_symlink = False
def __init__(self):
pass
@property
def name(self):
return self.__class__.__name__... |
import os
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux"):
triton_requirement... | import os
import sys
import pkg_resources
from setuptools import setup, find_packages
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux"):
triton_requirement... |
from jina import Executor, requests
from docarray import DocList
from docarray.documents import TextDoc
class MyExecutor(Executor):
@requests
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[TextDoc]:
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
return docs... | from jina import Executor, requests
from docarray import DocList
from docarray.documents import TextDoc
class MyExecutor(Executor):
@requests
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[TextDoc]:
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
return docs |
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
from backend.integrations.providers import ProviderName
ExaCredentials = APIKeyCredentials
ExaCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.EXA],
... | from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
ExaCredentials = APIKeyCredentials
ExaCredentialsInput = CredentialsMetaInput[
Literal["exa"],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id... |
from docarray import BaseDocument
from docarray.typing import AnyUrl
def test_set_any_url():
class MyDocument(BaseDocument):
any_url: AnyUrl
d = MyDocument(any_url="https://jina.ai")
assert isinstance(d.any_url, AnyUrl)
assert d.any_url == "https://jina.ai"
| from docarray import Document
from docarray.typing import AnyUrl
def test_set_any_url():
class MyDocument(Document):
any_url: AnyUrl
d = MyDocument(any_url="https://jina.ai")
assert isinstance(d.any_url, AnyUrl)
assert d.any_url == "https://jina.ai"
|
import csv
import os
from pathlib import Path
from torchaudio.datasets import ljspeech
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_TRANSCRIPTS = [
"Test transcript 1",
"Test transcript 2",
"Test transcript 3",
"In 1465 Sweynhe... | import csv
import os
from pathlib import Path
from torchaudio.datasets import ljspeech
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_TRANSCRIPTS = [
"Test transcript 1",
"Test transcript 2",
"Test transcript 3",
"In 1465 Sweynhe... |
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEnc... | from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEnc... |
"""
this test check the docstring of all of our public API. It does it
by checking the `__all__` of each of our namespace.
to add a new namespace you need to
* import it
* add it to the `SUB_MODULE_TO_CHECK` list
"""
import pytest
from mktestdocs import check_docstring, get_codeblock_members
import docarray.data
imp... | """
this test check the docstring of all of our public API. It does it
by checking the `__all__` of each of our namespace.
to add a new namespace you need to
* import it
* add it to the `SUB_MODULE_TO_CHECK` list
"""
import pytest
from mktestdocs import check_docstring, get_codeblock_members
import docarray.data
imp... |
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.computation import AbstractComputationalBackend
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import Mo... | import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.computation import AbstractComputationalBackend
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import Mo... |
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = No... | checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = No... |
from __future__ import annotations
from .CSRLoss import CSRLoss
from .CSRReconstructionLoss import CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import S... | from __future__ import annotations
from .CSRLoss import CSRLoss
from .CSRReconstructionLoss import CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import S... |
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
f... | from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
f... |
from typing import Any, Optional
class ServiceContext:
"""
Service Context container.
NOTE: Deprecated, use llama_index.settings.Settings instead or pass in
modules to local functions/methods/interfaces.
"""
def __init__(self, **kwargs: Any) -> None:
raise ValueError(
"S... | from typing import Any, Optional
class ServiceContext:
"""Service Context container.
NOTE: Deprecated, use llama_index.settings.Settings instead or pass in
modules to local functions/methods/interfaces.
"""
def __init__(self, **kwargs: Any) -> None:
raise ValueError(
"Servic... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Iterator, List, Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import EVALUATOR, METRICS
from .metric import BaseMetric
@EVALUATOR.register_module()
class Evaluator:
"""Wrapper class to compose multiple... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Iterator, List, Optional, Sequence, Union
from mmengine.data import BaseDataElement
from ..registry.root import EVALUATOR, METRICS
from .metric import BaseMetric
@EVALUATOR.register_module()
class Evaluator:
"""Wrapper class to compose multiple :... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Union, Dict
import numpy as np
from annoy import AnnoyIndex
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.index... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Union, Dict
import numpy as np
from annoy import AnnoyIndex
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.index... |
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import CSRReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipl... | from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.ReconstructionLoss import ReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegat... |
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.1... | _base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
... |
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
... | from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, s... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.0.0'
mmengi... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.0.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.0.0'
mmengi... |
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"... | from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"... |
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):... | import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):... |
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float... | from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float... |
from qdrant_client.http.models.models import Distance
DISTANCES = {
'cosine': Distance.COSINE,
'euclidean': Distance.EUCLID,
'dot': Distance.DOT,
}
| from qdrant_openapi_client.models.models import Distance
DISTANCES = {
'cosine': Distance.COSINE,
'euclidean': Distance.EUCLID,
'dot': Distance.DOT,
}
|
"""Cloudflare embeddings file."""
from typing import Any, List, Optional
import requests
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
fro... | """Cloudflare embeddings file."""
from typing import Any, List, Optional
import requests
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
fro... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from torch.optim import SGD
from mmengine.model import BaseDataPreprocessor, BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODELS
from mmengine.testing imp... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from torch.optim import SGD
from mmengine.model import BaseDataPreprocessor, BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODELS
from mmengine.testing imp... |
import gzip
import os
from . import InputExample
class NLIDataReader(object):
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
dat... | from . import InputExample
import csv
import gzip
import os
class NLIDataReader(object):
"""
Reads in the Stanford NLI dataset and the MultiGenre NLI dataset
"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.