input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
_base_ = './queryinst_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
... | _base_ = './queryinst_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, ... |
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):... | from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
... |
import traceback
from typing import Optional
from jina.proto import jina_pb2
from jina.serve.executors import BaseExecutor
from jina.types.mixin import ProtoTypeMixin
class Request(ProtoTypeMixin):
"""
:class:`Request` is one of the primitive data types in Jina, and serves as a base for
:class:`~data.Dat... | import traceback
from typing import Optional
from jina.proto import jina_pb2
from jina.serve.executors import BaseExecutor
from jina.types.mixin import ProtoTypeMixin
class Request(ProtoTypeMixin):
"""
:class:`Request` is one of the primitive data types in Jina, and serves as a base for
:class:`~data.Dat... |
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model setting
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
init_cfg=dict(
type='Pretrained',
... | _base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
... |
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransfor... | from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransfor... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... |
# coding: utf-8
"""Tests for dual GPU+CPU support."""
import os
import platform
import pytest
from sklearn.metrics import log_loss
import lightgbm as lgb
from .utils import load_breast_cancer
@pytest.mark.skipif(
os.environ.get("LIGHTGBM_TEST_DUAL_CPU_GPU", None) is None,
reason="Only run if appropriate e... | # coding: utf-8
"""Tests for dual GPU+CPU support."""
import os
import pytest
from sklearn.metrics import log_loss
import lightgbm as lgb
from .utils import load_breast_cancer
@pytest.mark.skipif(
os.environ.get("LIGHTGBM_TEST_DUAL_CPU_GPU", None) is None,
reason="Only run if appropriate env variable is s... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial... | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Video
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_avai... | import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Video
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from tests import TOYDATA_DIR
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = '... |
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Mask(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for segmentation and detection masks.
Args:
data (tensor-like, PIL.Image.Image): Any data that ... | from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Mask(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for segmentation and detection masks.
Args:
data (tensor-like, PIL.Image.Image): Any data that ... |
# This is different from the TTA of official CenterNet.
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
tta_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args=dict(backend='disk')),
dict(
... | # This is different from the TTA of official CenterNet.
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
tta_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args=dict(backend='disk')),
dict(
... |
"""Argparser module for container runtimes"""
import argparse
from jina.enums import DockerNetworkMode
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_container_runtime_parser(parser, pod_type: str = 'executor'):
"""Mixing in arguments required by :class:`ContainerRuntime... | """Argparser module for container runtimes"""
import argparse
from jina.enums import DockerNetworkMode
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_container_runtime_parser(parser, pod_type: str = 'executor'):
"""Mixing in arguments required by :class:`ContainerRuntime`... |
from typing import TYPE_CHECKING, Any
from langchain._api.module_import import create_importer
if TYPE_CHECKING:
from langchain_community.chat_loaders.facebook_messenger import (
FolderFacebookMessengerChatLoader,
SingleFileFacebookMessengerChatLoader,
)
module_lookup = {
"SingleFileFaceb... | from typing import TYPE_CHECKING, Any
from langchain._api.module_import import create_importer
if TYPE_CHECKING:
from langchain_community.chat_loaders.facebook_messenger import (
FolderFacebookMessengerChatLoader,
SingleFileFacebookMessengerChatLoader,
)
module_lookup = {
"SingleFileFaceb... |
_base_ = [
'../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dic... | _base_ = [
'../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
... |
from __future__ import annotations
from typing import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
... | from __future__ import annotations
from typing import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
... |
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Wrapper")
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this cla... | from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Wrapper")
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this cla... |
"""Defines utilities for switching audio backends"""
import os
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
... | """Defines utilities for switching audio backends"""
import os
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
... |
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init_... | from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init_... |
"""Module for argparse for Client"""
def mixin_client_protocol_parser(parser):
"""Add the arguments for the protocol to the client parser
:param parser: the parser configure
"""
from jina.enums import GatewayProtocolType
parser.add_argument(
'--protocol',
type=GatewayProtocolTyp... | """Module for argparse for Client"""
def mixin_client_protocol_parser(parser):
"""Add the arguments for the protocol to the client parser
:param parser: the parser configure
"""
from jina.enums import GatewayProtocolType
parser.add_argument(
'--protocol',
type=GatewayProtocolTyp... |
import pytest
from jina.enums import GatewayProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
... | import pytest
from jina.enums import GatewayProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
... |
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cann... | """
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cann... |
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model impo... | __copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model impo... |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.config import ConfigDict
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mm... | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils.typing import ConfigDict, MultiConfig, OptConfigType
from ... |
from typing import Iterable, Dict
from docarray.array.storage.annlite.helper import OffsetMapping
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document, Document... | from typing import Iterable, Dict
from docarray.array.storage.annlite.helper import OffsetMapping
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class G... |
import numpy as np
from keras.src.api_export import keras_export
@keras_export(
[
"keras.utils.pad_sequences",
"keras.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
... | import numpy as np
from keras.src.api_export import keras_export
@keras_export(
[
"keras.utils.pad_sequences",
"keras.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
... |
import pytest
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
def test_bad_import():
from jina.logging.predefined import default_logger
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
impo... | import pytest
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
def test_bad_import():
from jina.logging.predefined import default_logger
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
impo... |
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from .errors import WorkflowValidationError
from .utils import (
ServiceDefinition,
inspect_signature,
is_free_function,
validate_step_signature,
)
from .resource im... | from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from .errors import WorkflowValidationError
from .utils import (
is_free_function,
validate_step_signature,
inspect_signature,
ServiceDefinition,
)
if TYPE_CHECKING... |
from typing import Any, List, Optional
from mcp.client.session import ClientSession
from mcp.server.fastmcp import FastMCP, Context
from pydantic import BaseModel
from llama_index.core.tools import FunctionTool
from llama_index.core.workflow import Event, StartEvent, StopEvent, Workflow
from llama_index.tools.mcp.bas... | from typing import Any, List, Optional
from mcp.client.session import ClientSession
from mcp.server.fastmcp import FastMCP, Context
from pydantic import BaseModel
from llama_index.core.tools import FunctionTool
from llama_index.core.workflow import Event, StartEvent, StopEvent, Workflow
from llama_index.tools.mcp.bas... |
"""Test Petals API wrapper."""
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.llms.petals import Petals
def test_api_key_is_string() -> None:
llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type]
assert isinstance(llm.huggingface_api_key, Sec... | """Test Petals API wrapper."""
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.llms.petals import Petals
def test_api_key_is_string() -> None:
llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
assert isinstance(llm.huggingface_ap... |
from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
__all__ = ["RETURN_VAL_TYPE", "BaseCache"]
| from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
__all__ = ["BaseCache", "RETURN_VAL_TYPE"]
|
"""Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
"""
S... | """Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
"""
Si... |
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color impor... | from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color impor... |
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
from sentencizer import Sentencizer
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.min_sent_len == 1
@pytest.mark.parametrize('traversal_paths', ['@r', '@c'])
def... | from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
from sentencizer import Sentencizer
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.min_sent_len == 1
@pytest.mark.parametrize('traversal_paths', [('r',), ('c',)])... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_... | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_... |
import logging
import os
import zlib
from contextlib import asynccontextmanager
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn... | import logging
import os
import zlib
from contextlib import asynccontextmanager
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn_retry
load_dotenv()
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "s... |
import shutil
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidat... | import shutil
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidat... |
import pytest
import pytest_socket
import requests
def test_socket_disabled() -> None:
"""This test should fail."""
with pytest.raises(pytest_socket.SocketBlockedError):
# noqa since we don't need a timeout here as the request should fail immediately
requests.get("https://www.example.com") # ... | import pytest
import pytest_socket
import requests
def test_socket_disabled() -> None:
"""This test should fail."""
with pytest.raises(pytest_socket.SocketBlockedError):
requests.get("https://www.example.com")
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_d... | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_d... |
import os
import time
import pytest
from prometheus_api_client import PrometheusConnect
from jina.helper import random_port
@pytest.fixture()
def jaeger_port():
port = random_port()
os.environ['JAEGER_PORT'] = str(port)
yield port
del os.environ['JAEGER_PORT']
@pytest.fixture()
def prometheus_back... | import os
import time
import pytest
from prometheus_api_client import PrometheusConnect
from jina.helper import random_port
@pytest.fixture()
def jaeger_port():
port = random_port()
os.environ['JAEGER_PORT'] = str(port)
yield port
del os.environ['JAEGER_PORT']
@pytest.fixture()
def prometheus_back... |
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
tra... | from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
tra... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
cl... | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class Te... |
"""Chat generation output classes."""
from __future__ import annotations
from typing import TYPE_CHECKING, Literal, Union
from pydantic import model_validator
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge i... | from __future__ import annotations
from typing import TYPE_CHECKING, Literal, Union
from pydantic import model_validator
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dicts
if TYPE_CHECKING:
... |
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/... | """
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/... |
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List ... | # coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List ... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_boxes import BaseBoxes
from .bbox_overlaps import bbox_overlaps
from .box_type import (autocast_box_type, convert_box_type, get_box_type,
register_box, register_box_converter)
from .horizontal_boxes import HorizontalBoxes
from .transforms... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_boxes import BaseBoxes
from .bbox_overlaps import bbox_overlaps
from .box_type import (autocast_box_type, convert_box_type, get_box_type,
register_box, register_box_converter)
from .horizontal_boxes import HorizontalBoxes
from .transforms... |
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
... | from __future__ import annotations
import torch
from torch import nn
# TODO: SAVING LOADING with config.json
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and a... |
_base_ = './ms-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
... | _base_ = './ms_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
... |
"""
Demo for using and defining callback functions
==============================================
.. versionadded:: 1.3.0
"""
import argparse
import os
import tempfile
from typing import Dict
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.mode... | """
Demo for using and defining callback functions
==============================================
.. versionadded:: 1.3.0
"""
import argparse
import os
import tempfile
from typing import Dict
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.constraints import deserialize as deserialize
from keras.src.constraints import get as get
from keras.src.constraints import serialize as serialize
from keras.src.constraints.constrai... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.constraints import deserialize
from keras.src.constraints import get
from keras.src.constraints import serialize
from keras.src.constraints.constraints import Constraint
from keras.sr... |
"""DeepLake multimodal Retrieval Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.multi_modal import MultiModalVectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import SimpleMultiModalQueryEngine
from llama_index.core.sche... | """DeepLake multimodal Retrieval Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.multi_modal import MultiModalVectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import SimpleMultiModalQueryEngine
from llama_index.core.sch... |
from jina_cli.export import api_to_dict
def _build_lookup_table():
all_keywords = {}
import copy
def build_invert_index(d, usage='jina'):
for k in d['methods']:
usg = f'{usage} {k["name"]}'
if 'methods' in k:
build_invert_index(k, usage=usg)
if ... | from jina_cli.export import api_to_dict
def _build_lookup_table():
all_keywords = {}
import copy
def build_invert_index(d, usage='jina'):
for k in d['methods']:
usg = f'{usage} {k["name"]}'
if 'methods' in k:
build_invert_index(k, usage=usg)
if ... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.volcengine_maas import (
VolcEngineMaasChat,
convert_dict_to_message,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.volcengine_maas import (
VolcEngineMaasChat,
convert_dict_to_message,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic... |
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.... | _base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | import tracemalloc
from functools import wraps
from docarray import DocList
from docarray.documents import TextDoc
def get_test_da(n: int):
return DocList[TextDoc](gen_text_docs(n))
def gen_text_docs(n: int):
for i in range(n):
yield TextDoc(text=f'text {i}')
def profile_memory(func):
"""Deco... |
ac_file = '../jina_cli/autocomplete.py'
def _update_autocomplete():
from jina.parsers import get_main_parser
def _gaa(key, parser):
_result = {}
_compl = []
for v in parser._actions:
if v.option_strings:
_compl.extend(v.option_strings)
elif v.ch... | ac_file = '../jina_cli/autocomplete.py'
def _update_autocomplete():
from jina.parsers import get_main_parser
def _gaa(key, parser):
_result = {}
_compl = []
for v in parser._actions:
if v.option_strings:
_compl.extend(v.option_strings)
elif v.ch... |
import types
from typing import TYPE_CHECKING
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docar... | from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
__all__ = ['AudioNdArray']
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # n... |
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(... | from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample
from sentence_transformers import models, util, evaluation, losses
import logging
import os
import gzip
from datetime import datetime
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logg... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINAppOps
from langchain_community.tools.ainetwork.app import AppOperationType, AppSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate lo... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINAppOps
from langchain_community.tools.ainetwork.app import AppOperationType, AppSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate lo... |
from typing import Any, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type.""... | from typing import Any, Optional
from typing_inspect import get_args, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(t... |
"""Argparser module for hub push"""
import argparse
import os
from jina.parsers.helper import add_arg_group
def mixin_hub_push_parser(parser):
"""Add the arguments for hub push to the parser
:param parser: the parser configure
"""
def dir_path(string):
if os.path.isdir(string):
... | """Argparser module for hub push"""
import argparse
import os
from jina.parsers.helper import add_arg_group
def mixin_hub_push_parser(parser):
"""Add the arguments for hub push to the parser
:param parser: the parser configure
"""
def dir_path(string):
if os.path.isdir(string):
... |
import logging
import os
from functools import cache
from autogpt_libs.utils.cache import thread_cached
from dotenv import load_dotenv
from redis import Redis
from redis.asyncio import Redis as AsyncRedis
from backend.util.retry import conn_retry
load_dotenv()
HOST = os.getenv("REDIS_HOST", "localhost")
PORT = int(... | import logging
import os
from dotenv import load_dotenv
from redis import Redis
from redis.asyncio import Redis as AsyncRedis
from backend.util.retry import conn_retry
load_dotenv()
HOST = os.getenv("REDIS_HOST", "localhost")
PORT = int(os.getenv("REDIS_PORT", "6379"))
PASSWORD = os.getenv("REDIS_PASSWORD", "passwo... |
from pathlib import Path
from typing import Tuple, Union
import torch
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
from torchaudio._internal.module_utils import dropping_support, dropping_class_support
_SUBSETS = ["music", "noise", "speech"]
_SAMPLE_RATE = 16_000
@droppi... | from pathlib import Path
from typing import Tuple, Union
import torch
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
from torchaudio._internal.module_utils import dropping_support
_SUBSETS = ["music", "noise", "speech"]
_SAMPLE_RATE = 16_000
class Musan(Dataset):
r"""... |
__version__ = '0.33.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()... | __version__ = '0.32.2'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()... |
# Copyright (c) OpenMMLab. All rights reserved.
# config now can have imported modules and defined functions for convenience
import os.path as osp
def func():
return 'string with \tescape\\ characters\n'
test_item1 = [1, 2]
bool_item2 = True
str_item3 = 'test'
dict_item4 = dict(
a={
'c/d': 'path/d',... | # Copyright (c) OpenMMLab. All rights reserved.
test_item1 = [1, 2]
bool_item2 = True
str_item3 = 'test'
dict_item4 = dict(
a={
'c/d': 'path/d',
'f': 's3//f',
6: '2333',
'2333': 'number'
},
b={'8': 543},
c={9: 678},
d={'a': 0},
f=dict(a='69'))
dict_item5 = {'x/x':... |
__version__ = '0.14.11'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.14.10'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from ... | """Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from ... |
"""Output parsers using Pydantic."""
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langc... | """Output parsers using Pydantic."""
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langc... |
from langchain_anthropic.chat_models import (
ChatAnthropic,
ChatAnthropicMessages,
convert_to_anthropic_tool,
)
from langchain_anthropic.llms import Anthropic, AnthropicLLM
__all__ = [
"Anthropic",
"AnthropicLLM",
"ChatAnthropic",
"ChatAnthropicMessages",
"convert_to_anthropic_tool",
]... | from langchain_anthropic.chat_models import (
ChatAnthropic,
ChatAnthropicMessages,
convert_to_anthropic_tool,
)
from langchain_anthropic.llms import Anthropic, AnthropicLLM
__all__ = [
"ChatAnthropicMessages",
"ChatAnthropic",
"convert_to_anthropic_tool",
"Anthropic",
"AnthropicLLM",
]... |
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from ..base.backend import BaseBackendMixin, TypeMap
from ....helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHEC... | from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.0... | import mmcv
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution... |
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class Sparse... | from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class Sparse... |
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Us... | # dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
tra... |
from langchain_community.document_loaders import BiliBiliLoader
def test_bilibili_loader() -> None:
"""Test Bilibili Loader."""
loader = BiliBiliLoader(
[
"https://www.bilibili.com/video/BV1xt411o7Xu/",
"https://www.bilibili.com/video/av330407025/",
"https://www.bil... | from langchain_community.document_loaders import BiliBiliLoader
def test_bilibili_loader() -> None:
"""Test Bilibili Loader."""
loader = BiliBiliLoader(
[
"https://www.bilibili.com/video/BV1xt411o7Xu/",
"https://www.bilibili.com/video/av330407025/",
]
)
docs = l... |
"""
=====================================================
MNIST classification using multinomial logistic + L1
=====================================================
Here we fit a multinomial logistic regression with L1 penalty on a subset of
the MNIST digits classification task. We use the SAGA algorithm for this
purp... | """
=====================================================
MNIST classification using multinomial logistic + L1
=====================================================
Here we fit a multinomial logistic regression with L1 penalty on a subset of
the MNIST digits classification task. We use the SAGA algorithm for this
purp... |
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Dropout")
class Dropout(Layer):
"""Applies dropout to the input.
The `Dropout` layer randomly sets input units to 0 with a frequency of
`rate` at each step duri... | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Dropout")
class Dropout(Layer):
"""Applies dropout to the input.
The `Dropout` layer randomly sets input units to 0 with a frequency of
`rate` at each step duri... |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmdet.registry import TASK_UTILS
IOU_CALCULATORS = TASK_UTILS
def build_iou_calculator(cfg, default_args=None):
"""Builder of IoU calculator."""
warnings.warn(
'``build_iou_calculator`` would be deprecated soon, please use '
... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import Registry, build_from_cfg
IOU_CALCULATORS = Registry('IoU calculator')
def build_iou_calculator(cfg, default_args=None):
"""Builder of IoU calculator."""
return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
|
from typing import List, Optional
import numpy as np
import pytest
from docarray import DocList
from docarray.base_doc.doc import BaseDoc
from docarray.typing import NdArray
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
... | from typing import List, Optional
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optio... |
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
wi... | _base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='Loa... |
from collections.abc import Mapping
from operator import itemgetter
from typing import Any, Callable, Optional, Union
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain_core.runnables import RouterRunnable, Runnable
from l... | from collections.abc import Mapping
from operator import itemgetter
from typing import Any, Callable, Optional, Union
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain_core.runnables import RouterRunnable, Runnable
from l... |
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
import torchaudio
from .backend import Backend
from .common import AudioMetaData
sox_ext = torchaudio._extension.lazy_import_sox_ext()
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format:... | import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
import torchaudio
from .backend import Backend
from .common import AudioMetaData
sox_ext = torchaudio._extension.lazy_import_sox_ext()
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format:... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyCacheHoo... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the proc... |
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
In... | """
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(Tes... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(Tes... |
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
def is_torch_available():
return torch_imported
... | try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
def is_torch_available():
return torch_imported
|
import numpy as np
import pytest
from hnswlib_searcher import HnswlibSearcher
from jina import Document, DocumentArray, Flow
_DIM = 10
@pytest.mark.parametrize('uses', ['HnswlibSearcher', 'docker://hnswlibsearcher'])
def test_index_search_flow(uses: str, build_docker_image: str):
f = Flow().add(uses=uses, uses_w... | import numpy as np
import pytest
from hnswlib_searcher import HnswlibSearcher
from jina import Document, DocumentArray, Flow
_DIM = 10
@pytest.mark.parametrize('uses', ['HnswlibSearcher', 'docker://hnswlibsearcher'])
def test_index_search_flow(uses: str, build_docker_image: str):
f = Flow().add(uses=uses, uses_w... |
from __future__ import annotations
from .InputExample import InputExample
from .LabelSentenceReader import LabelSentenceReader
from .NLIDataReader import NLIDataReader
from .STSDataReader import STSBenchmarkDataReader, STSDataReader
from .TripletReader import TripletReader
__all__ = [
"InputExample",
"LabelSe... | from .InputExample import InputExample
from .LabelSentenceReader import LabelSentenceReader
from .NLIDataReader import NLIDataReader
from .STSDataReader import STSBenchmarkDataReader, STSDataReader
from .TripletReader import TripletReader
__all__ = [
"InputExample",
"LabelSentenceReader",
"NLIDataReader",
... |
import os
import platform
import tempfile
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import DatasetDict, load... | import os
import platform
import tempfile
import pytest
from sentence_transformers import SentenceTransformer, CrossEncoder
from sentence_transformers.models import Transformer, Pooling
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/... |
from jina.clients.helper import callback_exec
from jina.proto import jina_pb2_grpc
class StreamRpc:
"""Class that encapsulated the methods required to run a stream rpc call from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
co... | from jina.clients.helper import callback_exec
from jina.proto import jina_pb2_grpc
class StreamRpc:
"""Class that encapsulated the methods required to run a stream rpc call from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
co... |
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
from __future__ import annotations
import difflib
from pathlib import Path
import pytest
from typer.testing import CliRunner
from langchain_cli.cli import app
from tests.unit_tests.migrate.cli_runner.cases import before, expected
from tests.unit_tests.migrate.cli_runner.folder import Folder
pytest.importorskip("gri... | from __future__ import annotations
import difflib
from pathlib import Path
import pytest
from typer.testing import CliRunner
from langchain_cli.cli import app
from tests.unit_tests.migrate.cli_runner.cases import before, expected
from tests.unit_tests.migrate.cli_runner.folder import Folder
pytest.importorskip("gri... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.openapi.response_chain import (
RESPONSE_TEMPLATE,
APIResponderChain,
APIResponderOutputParser,
)
# Create a way to dynamically look up deprecated imports... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.openapi.response_chain import (
RESPONSE_TEMPLATE,
APIResponderChain,
APIResponderOutputParser,
)
# Create a way to dynamically look up deprecated imports... |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... |
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.openvino import core
from keras.src.backend.openvino import image
from keras.src.backend.openvino import linalg
from keras.src.backend.openvino import math
from keras.src.backend.openvino import nn
from keras.src.backend.openvino import n... | from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.openvino import core
from keras.src.backend.openvino import image
from keras.src.backend.openvino import linalg
from keras.src.backend.openvino import math
from keras.src.backend.openvino import nn
from keras.src.backend.openvino import n... |
from torchaudio_unittest.common_utils import PytorchTestCase
from .autograd_test_impl import AutogradTestFloat32, AutogradTestMixin
class AutogradCPUTest(AutogradTestMixin, PytorchTestCase):
device = "cpu"
class AutogradRNNTCPUTest(AutogradTestFloat32, PytorchTestCase):
device = "cpu"
| from torchaudio_unittest.common_utils import PytorchTestCase
from .autograd_test_impl import AutogradTestMixin, AutogradTestFloat32
class AutogradCPUTest(AutogradTestMixin, PytorchTestCase):
device = "cpu"
class AutogradRNNTCPUTest(AutogradTestFloat32, PytorchTestCase):
device = "cpu"
|
import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.cor... | import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.cor... |
from typing import List, Optional
from docarray.base_doc.doc import BaseDoc
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
... | from typing import Optional, List
from docarray.base_document.document import BaseDocument
def test_base_document_init():
doc = BaseDocument()
assert doc.id is not None
def test_update():
class MyDocument(BaseDocument):
content: str
title: Optional[str] = None
tags_: List
d... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.