input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pd... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf... |
from abc import ABC
from typing import Any, Optional, Tuple, Type, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.torch_tensor import TorchTensor
T = TypeVar('T', bound='Embedding')
class EmbeddingMixin(... | from typing import TypeVar
from docarray.proto import NodeProto
from docarray.typing.tensor import NdArray
T = TypeVar('T', bound='Embedding')
class Embedding(NdArray):
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This functio... |
import PIL.Image
import pytest
import torch
import torchvision.prototype.transforms.utils
from prototype_common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision.prototype import datapoints
from torchvision.prototype.transforms.functional import to_image_pil
from torchvision.prototype.... | import PIL.Image
import pytest
import torch
import torchvision.prototype.transforms.utils
from prototype_common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision.prototype import datapoints
from torchvision.prototype.transforms.functional import to_image_pil
from torchvision.prototype.... |
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ... | """Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ... |
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Authors: The scikit-learn developers
# SPDX-Li... | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Authors: The scikit-learn developers
# SPDX-Li... |
import os
from functools import partial
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
"""Read contents of compressed file as a filesystem with one file inside."""
root_marker = ""
protocol: st... | import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
"""Read contents of compressed file as a filesystem with one file inside."""
root_marker = ""
protocol: str = (
None # protocol... |
from pathlib import Path
from typing import List
import pytest
from flair_text import FlairTextEncoder
from jina import Document, DocumentArray, Executor
_EMBEDDING_DIM = 100
@pytest.fixture(scope='session')
def basic_encoder() -> FlairTextEncoder:
return FlairTextEncoder()
def test_config():
ex = Executo... | from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...flair_text import FlairTextEncoder
_EMBEDDING_DIM = 100
@pytest.fixture(scope='session')
def basic_encoder() -> FlairTextEncoder:
return FlairTextEncoder()
def test_config():
ex = Exe... |
# Copyright (c) OpenMMLab. All rights reserved.
third_part_libs = [
'pip install -r ../requirements/albu.txt',
'pip install instaboostfast',
'pip install git+https://github.com/cocodataset/panopticapi.git',
'pip install timm',
'pip install mmpretrain',
'pip install git+https://github.com/lvis-d... | # Copyright (c) OpenMMLab. All rights reserved.
third_part_libs = [
'pip install -r ../requirements/albu.txt',
'pip install instaboostfast',
'pip install git+https://github.com/cocodataset/panopticapi.git',
'pip install timm',
'pip install mmcls>=1.0.0rc0',
'pip install git+https://github.com/l... |
from typing import Optional
import numpy as np
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDocument):
tens... | from typing import Optional
import numpy as np
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDocument):
tens... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class VFNet(SingleStageDetector):
"""Implementation of `VarifocalNet
(VFNet).<https://arxiv.org/abs/2008.13367>`_"""
def __init__(self,
... | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class VFNet(SingleStageDetector):
"""Implementation of `VarifocalNet
(VFNet).<https://arxiv.org/abs/2008.13367>`_"""
def __init__(self,
... |
import inspect
import re
import warnings
from operator import itemgetter
from typing import Optional, Tuple, List
from jina import Document
def get_properties(cls) -> List[Tuple[str, Optional[str], Optional[str]]]:
src = inspect.getsource(cls)
members = dict(inspect.getmembers(cls))
setters = re.findall(... | import inspect
import re
import warnings
from operator import itemgetter
from typing import Optional, Tuple, List
from jina import Document
def get_properties(cls) -> List[Tuple[str, Optional[str], Optional[str]]]:
src = inspect.getsource(cls)
members = dict(inspect.getmembers(cls))
setters = re.findall(... |
"""LLM Chain for generating examples for question answering."""
from __future__ import annotations
from typing import Any
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseLLMOutputParser
from pydantic import Field
from langchain.chains.llm import LLMChain
fr... | """LLM Chain for generating examples for question answering."""
from __future__ import annotations
from typing import Any
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseLLMOutputParser
from pydantic import Field
from langchain.chains.llm import LLMChain
fr... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/sp... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/sp... |
"""Test Base Schema of documents."""
from collections.abc import Iterator
from langchain_core.document_loaders import BaseBlobParser, Blob
from langchain_core.documents import Document
def test_base_blob_parser() -> None:
"""Verify that the eager method is hooked up to the lazy method by default."""
class ... | """Test Base Schema of documents."""
from typing import Iterator
from langchain_core.document_loaders import BaseBlobParser, Blob
from langchain_core.documents import Document
def test_base_blob_parser() -> None:
"""Verify that the eager method is hooked up to the lazy method by default."""
class MyParser(... |
import random
import asyncio
import time
import aiohttp
import grpc
def _raise_last_attempt(err, attempt):
if isinstance(err, asyncio.CancelledError):
trailing_metadata = grpc.aio.Metadata()
trailing_metadata.add('jina-client-attempts', str(attempt))
raise grpc.aio.AioRpcError(
... | import asyncio
import random
import aiohttp
import grpc
async def wait_or_raise_err(
attempt: int,
err: Exception,
max_attempts: float,
backoff_multiplier: float,
initial_backoff: float,
max_backoff: float,
):
"""
Accepts retry parameters and the underlying. The error is raised if the... |
"""Module to test base parser implementations."""
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import (
BaseGenerat... | """Module to test base parser implementations."""
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseTransformOutput... |
from llama_index.core.indices.managed.base import BaseManagedIndex
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.indices.managed.vertexai import VertexAIIndex
from llama_index.indices.managed.vertexai import VertexAIRetriever
def test_class():
names_of_base_classes = [b.__name__ ... | from llama_index.core.indices.managed.base import BaseManagedIndex
from llama_index.indices.managed.vertexai import VertexAIIndex
def test_class():
names_of_base_classes = [b.__name__ for b in VertexAIIndex.__mro__]
assert BaseManagedIndex.__name__ in names_of_base_classes
|
from typing import Optional
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
SpinnerColumn,
Text,
TextColumn,
TimeElapsedColumn,
TimeRemainingColumn,
)
class QPSColumn(TextColumn):
def render(self, task) -> Text:
if task.speed:
_text = f'{ta... | from rich.progress import (
Progress,
BarColumn,
SpinnerColumn,
MofNCompleteColumn,
TextColumn,
TimeRemainingColumn,
Text,
)
class QPSColumn(TextColumn):
def render(self, task) -> Text:
if task.speed:
_text = f'{task.speed:.0f} QPS'
else:
_text =... |
from __future__ import annotations
import difflib
from pathlib import Path
import pytest
from typer.testing import CliRunner
from langchain_cli.cli import app
from tests.unit_tests.migrate.cli_runner.cases import before, expected
from tests.unit_tests.migrate.cli_runner.folder import Folder
pytest.importorskip("gri... | # ruff: noqa: E402
from __future__ import annotations
import pytest
pytest.importorskip("gritql")
import difflib
from pathlib import Path
from typer.testing import CliRunner
from langchain_cli.cli import app
from tests.unit_tests.migrate.cli_runner.cases import before, expected
from tests.unit_tests.migrate.cli_ru... |
import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channe... | import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channe... |
__version__ = '0.17.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.16.6'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR,... | # Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR,... |
import requests
from packaging import version
from typing import Sequence, Union, List, Optional
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
)
from tgi.types import (
Message,
)
def resolve_tgi_function_call(url: str) -> bool:
url = f"{url}/info"
model_info = dict(req... | import requests
from packaging import version
from typing import Sequence, Union, List, Optional
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
)
from tgi.types import (
Message,
)
def resolve_tgi_function_call(url: str) -> bool:
url = f"{url}/info"
model_info = dict(req... |
from __future__ import annotations
import concurrent.futures
from pathlib import Path
from typing import Iterator, Literal, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loade... | from __future__ import annotations
import concurrent.futures
from pathlib import Path
from typing import Iterator, Literal, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loade... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
from mmcv.transforms import Compose
from torchvision.transforms import functional as F
from mmdet.apis import init_detector
try:
import ffmpegcv
except ImportError:
raise ImportError(
... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
from torchvision.transforms import functional as F
from mmdet.apis import init_detector
from mmdet.datasets.pipelines import Compose
try:
import ffmpegcv
except ImportError:
raise ImportErro... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResN... | _base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResN... |
from pathlib import Path
from typing import Union, Tuple, List
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the *LibriMix* [:footcite:`cosentino2020librimix`] dataset.
Args:
root (st... | from pathlib import Path
from typing import Union, Tuple, List
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the LibriMix dataset.
Args:
root (str or Path): The path to the directory ... |
import requests
from docarray import DocumentArray
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={
'n_dim': 100,
'ef': 100,
'ef_construction': 100,
'max_connections': 16,
'dynamic_ef_min': 50,
... | import requests
from docarray import DocumentArray
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={'n_dim': 100, 'ef': 100, 'ef_construction': 100, 'max_connections': 16},
)
result = requests.get('http://localhost:8080/v1/schema').json()
classe... |
from __future__ import annotations
import gzip
import os
from . import InputExample
class NLIDataReader:
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples... | from __future__ import annotations
import gzip
import os
from . import InputExample
class NLIDataReader(object):
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_... |
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc ... | from typing import Union
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.embedding.torch import TorchEmbedding
tf_available =... |
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Imag... | import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help=... |
"""
Computes embeddings
"""
from typing import Optional
import numpy as np
import pytest
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_bert_ti... | """
Computes embeddings
"""
import numpy as np
import pytest
from typing import Optional
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_bert_tin... |
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.evaluator import Evaluator
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from t... | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.evaluator import Evaluator
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from t... |
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.json import json
class StepThroughItemsBlock(Block):
class Input(BlockSchema):
items: list = SchemaField(
advanced=False,
... | from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class StepThroughItemsBlock(Block):
class Input(BlockSchema):
items: list | dict = SchemaField(
description="The list or dictionary of items to itera... |
from torio.io import CodecConfig, StreamingMediaDecoder as StreamReader, StreamingMediaEncoder as StreamWriter
from ._effector import AudioEffector
from ._playback import play_audio
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
| from ._effector import AudioEffector
from ._playback import play_audio
from ._stream_reader import StreamReader
from ._stream_writer import CodecConfig, StreamWriter
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
|
import asyncio
import os
from jina import __default_host__
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
from jina.serve.runtimes.gateway.http.gateway import HTTPGateway
class HTTPGatewayRuntime(GatewayRuntim... | import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
class HTTPGatewayRuntime(GatewayRuntime):
... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_ar... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = ... |
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent imp... | """Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent imp... |
"""
Official evaluation script for ReCoRD v1.0.
(Some functions are adopted from the SQuAD evaluation script.)
"""
import argparse
import json
import re
import string
import sys
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
... | """
Official evaluation script for ReCoRD v1.0.
(Some functions are adopted from the SQuAD evaluation script.)
"""
import argparse
import json
import re
import string
import sys
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
... |
from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
from ._stereo_matching import (
CarlaStereo,
CREStereo,
ETH3DStereo,
FallingThingsStereo,
InStereo2k,
Kitti2012Stereo,
Kitti2015Stereo,
Middlebury2014Stereo,
SceneFlowStereo,
SintelStereo,
)
from .ca... | from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
from ._stereo_matching import (
CarlaStereo,
CREStereo,
ETH3DStereo,
FallingThingsStereo,
InStereo2k,
Kitti2012Stereo,
Kitti2015Stereo,
Middlebury2014Stereo,
SceneFlowStereo,
SintelStereo,
)
from .ca... |
"""Product extraction pack."""
import asyncio
from typing import Any, Dict
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program.multi_modal_llm_program import (
Mu... | """Product extraction pack."""
import asyncio
from typing import Any, Dict
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program.multi_modal_llm_program import (
Mu... |
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of N... | from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
T = TypeVar('T', bound='AudioNdArray')
@_register_proto(proto_type_name='aud... |
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.7.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embe... | from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.6.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embe... |
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class TrafilaturaWebReader(BasePydanticReader):
"""
Trafilatura web page reader.
Reads pages from the web.
Requires the `trafilatura` package.
"""
is_remote: bo... | from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class TrafilaturaWebReader(BasePydanticReader):
"""Trafilatura web page reader.
Reads pages from the web.
Requires the `trafilatura` package.
"""
is_remote: bool = ... |
from workflows.resource import Resource, ResourceDefinition, ResourceManager # noqa
| import inspect
from typing import (
Callable,
Generic,
TypeVar,
Union,
Awaitable,
Dict,
Any,
cast,
)
from pydantic import (
BaseModel,
ConfigDict,
)
T = TypeVar("T")
class _Resource(Generic[T]):
def __init__(
self, factory: Callable[..., Union[... |
# mypy: allow-untyped-defs
from collections import OrderedDict
__all__ = ["raises", "expand_tuples", "reverse_dict", "groupby", "typename"]
def raises(err, lamda): # codespell:ignore lamda
try:
lamda() # codespell:ignore lamda
return False
except err:
return True
def expand_tuple... | # mypy: allow-untyped-defs
from collections import OrderedDict
__all__ = ["raises", "expand_tuples", "reverse_dict", "groupby", "typename"]
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
... |
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_da... | """
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import traceback
from sentence_transformers import SentenceTransformer
from sentence_transf... |
from typing import Union, Iterable, MutableSequence, Iterator
from docarray.array.storage.memory.backend import needs_id2offset_rebuild
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like m... | from typing import Union, Iterable, MutableSequence, Iterator
from ..memory.backend import needs_id2offset_rebuild
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
@needs_id2offset_rebuild
de... |
import copy
import warnings
from collections.abc import Mapping, Sequence
from typing import Any, TypeVar, Union
from torch.utils.data.datapipes.datapipe import MapDataPipe
_T = TypeVar("_T")
__all__ = ["SequenceWrapperMapDataPipe"]
class SequenceWrapperMapDataPipe(MapDataPipe[_T]):
r"""
Wraps a sequence ... | # mypy: allow-untyped-defs
import copy
import warnings
from torch.utils.data.datapipes.datapipe import MapDataPipe
__all__ = ["SequenceWrapperMapDataPipe"]
class SequenceWrapperMapDataPipe(MapDataPipe):
r"""
Wraps a sequence object into a MapDataPipe.
Args:
sequence: Sequence object to be wrap... |
from __future__ import annotations
import json
import logging
import re
from re import Pattern
from typing import Optional, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pyd... | from __future__ import annotations
import json
import logging
import re
from re import Pattern
from typing import Optional, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pyd... |
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .model_test_impl import Tacotron2DecoderTests, Tacotron2EncoderTests, Tacotron2Tests
class TestTacotron2EncoderFloat32CPU(Tacotron2EncoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTaco... | import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .model_test_impl import (
Tacotron2DecoderTests,
Tacotron2EncoderTests,
Tacotron2Tests,
)
class TestTacotron2EncoderFloat32CPU(Tacotron2EncoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**k... | from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**k... |
import logging
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_box_folder_files_details,
get_text_representation,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors... | import logging
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_box_folder_files_details,
get_text_representation,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors... |
import os
from llama_index.core.tools.function_tool import FunctionTool
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_t... | import os
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.llms.gemini import Gemini
from llama_index.llms.gemini.utils import chat_message_to_gemini
def test_embedding_class():
names_of_base_class... |
try:
from docarray import BaseDoc as Document
from docarray import DocList as DocumentArray
docarray_v2 = True
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
| try:
from docarray import BaseDoc as Document
from docarray import DocArray as DocumentArray
docarray_v2 = True
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
|
from collections.abc import AsyncIterator
import pytest
from langchain_core.utils.aiter import abatch_iterate
@pytest.mark.parametrize(
("input_size", "input_iterable", "expected_output"),
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
... | from collections.abc import AsyncIterator
import pytest
from langchain_core.utils.aiter import abatch_iterate
@pytest.mark.parametrize(
("input_size", "input_iterable", "expected_output"),
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
... |
from llama_index_instrumentation.event_handlers.base import BaseEventHandler # noqa
| from typing import Any
from abc import abstractmethod
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from llama_index.core.instrumentation.events.base import BaseEvent
class BaseEventHandler(BaseModel):
"""Base callback handler that can be used to track event starts and ends."""
model_con... |
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvisi... | """Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvisi... |
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina._docarray import Document
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import Data... | """Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina._docarray import Document
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import Data... |
from typing import Any
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
def test_parse() -> None:
"""Test parsing structured output."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
... | from typing import Any, Dict
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
def test_parse() -> None:
"""Test parsing structured output."""
response_schemas = [
ResponseSchema(name="name", description="desc"),
... |
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.l... | from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.l... |
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.tv_tensors._tv_tensor import TVTensor
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(TVTensor):
categories: Optional[Sequence[str]]... | from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.tv_tensors._tv_tensor import TVTensor
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(TVTensor):
categories: Optional[Sequence[str]]... |
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],... | import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],... |
import copy
import warnings
from dataclasses import InitVar, dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*... | import copy
import warnings
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
... |
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from ... | """Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from ... |
import requests
from typing import List, Dict
DEFAULT_GITBOOK_API_URL = "https://api.gitbook.com/v1"
class GitbookClient:
"""
Gitbook Restful API Client.
Helper Class to invoke gitbook restful api & parse result
Args:
api_token (str): Gitbook API Token.
api_url (str): Gitbook API En... | import requests
from typing import List, Dict
DEFAULT_GITBOOK_API_URL = "https://api.gitbook.com/v1"
class GitbookClient:
"""Gitbook Restful API Client.
Helper Class to invoke gitbook restful api & parse result
Args:
api_token (str): Gitbook API Token.
api_url (str): Gitbook API Endpoin... |
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this cl... | from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this cl... |
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.uti... | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import ConfigType, OptMultiConfig, SampleList
from mmdet.registry imp... |
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class HubSpotContactBlock(Bl... | from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotContactBlock(Bl... |
import os
import fsspec
import pytest
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec... | import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
... |
from typing import Optional
import pytest
import torch
from docarray import BaseDocument, DocumentArray, Text
from docarray.array.abstract_array import AnyDocumentArray
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSu... | from typing import Optional
import pytest
import torch
from docarray import Document, DocumentArray, Text
from docarray.array.abstract_array import AnyDocumentArray
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc... |
import numpy as np
import pytest
from docarray.proto import DocProto, NodeProto
from docarray.typing import NdArray
@pytest.mark.proto
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(custom_n... | import numpy as np
import pytest
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import NdArray
@pytest.mark.proto
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(cus... |
"""Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from collections import defaultdict
from time import time
import numpy as np
from scipy.linalg import svd
from sklearn.datasets import make_low_rank_matrix
from sklearn.utils.e... | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from collections import defaultdict
from time import time
import numpy as np
from scipy.linalg import svd
from sklearn.datasets import make_low_rank_matrix
from sklearn.utils.e... |
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
... | from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledis... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledis... |
import os
import shutil
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequ... | import os
import shutil
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequ... |
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Dict, Optional
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average... | # Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Dict, Optional
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average... |
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Financials API"""
qu... | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Financials API"""
qu... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_stream... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_stream... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from ..builder import HEADS
@HEADS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.o... | import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from ..builder import HEADS
@HEADS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar w... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = ... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = ... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import time
from contextlib import contextmanager
from typing import Generator, Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current ... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of r... |
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skip... | from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skip... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import GoogleTranslateTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import GoogleTranslateTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... |
from typing import Optional
from opentelemetry.context.context import Context
from jina import DocumentArray, Executor, requests
class ExecutorTestWithTracing(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.meter:
self.request_counter = self.... | from typing import Optional
from opentelemetry.context.context import Context
from jina import Executor, requests, DocumentArray
class ExecutorTestWithTracing(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.meter:
self.request_counter = self.m... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOO... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOO... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_clas... | import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.d... |
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
... | _base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm... |
_base_ = './ms-rcnn_r101-caffe_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
... | _base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
... |
import csv
import logging
import os
from typing import TYPE_CHECKING, Dict
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.Sent... | from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate... |
import pytest
from langchain_core.documents import Document
from langchain_core.indexing.api import _HashedDocument
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument( # type: ignore[call-arg]
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
... | import pytest
from langchain_core.documents import Document
from langchain_core.indexing.api import _HashedDocument
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument( # type: ignore[call-arg]
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
... |
import csv
import os
from pathlib import Path
from typing import Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
SAMPLE_RATE = 16000
class FluentSpeechCommands(Dataset):
"""*Fluent Speech Commands* :cite:`fluent` dataset
Args:
... | import csv
import os
from pathlib import Path
from typing import Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
SAMPLE_RATE = 16000
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* :cite:`fluent` Dataset
... |
"""Argparser module for the export API"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def set_export_parser(parser=None):
"""Set the parser for exporting
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_pa... | """Argparser module for the export API"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def set_export_parser(parser=None):
"""Set the parser for exporting
:param parser: the parser configure
:return: the parser
"""
if not parser:
parser = set_base_pa... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from simpleranker import SimpleRanker
@pytest.mark.parametrize('traversal_paths', [['r'], ['c']])
@pytest.mark.parametrize('ranking', ['min', 'max'])
def test_ranking(documents_chunk, documents_chunk_c... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from simpleranker import SimpleRanker
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
@pytest.mark.parametrize('ranking', ['min', 'max'])
def test_ranking(
documents_chunk, docu... |
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Lite... | """**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Lite... |
import logging
from typing import Any
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = lo... | import logging
from typing import Any
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = lo... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.