input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i i... | import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i i... |
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMom... | # Copyright (c) OpenMMLab. All rights reserved.
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residu... |
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isinstance(value, str):
doc.uri = value
doc._metadata['im... | from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isinstance(value, str):
doc.uri = value
doc._metadata['image_type'] = 'uri'
... |
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.8"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from... | """FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.7"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from... |
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
import torchaudio
from .backend import Backend
from .common import AudioMetaData
sox_ext = torchaudio._extension.lazy_import_sox_ext()
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format:... | import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
from .backend import Backend
from .common import AudioMetaData
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
if has... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow
from ...minranker import MinRanker
def test_integration(documents_chunk):
with Flow().add(uses=MinRanker, uses_with={'metric': 'cosine'}) as flow:
resp = flow.post(on='/search', in... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow
from ...minranker import MinRanker
def test_integration(documents_chunk):
with Flow().add(uses=MinRanker, override_with={'metric': 'cosine'}) as flow:
resp = flow.post(on='/search'... |
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='FreeAnchorRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
... | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='FreeAnchorRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFuncti... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFuncti... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
from mmdet.core.mask import BitmapMasks
from mmdet.datasets.pipelines import (FilterAnnotations, LoadImageFromFile,
LoadImageFromWebcam,
... | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_clas... |
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(re... | import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(re... |
import prisma.enums
import prisma.types
from backend.blocks.io import IO_BLOCK_IDs
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NO... | import prisma
from backend.blocks.io import IO_BLOCK_IDs
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignor... |
import types
from abc import ABC
from typing import Any, Callable, List, Optional, Tuple, TypeVar, Union
import numpy as np
from docarray.computation import AbstractComputationalBackend
T = TypeVar('T')
class AbstractNumpyBasedBackend(AbstractComputationalBackend[T], ABC):
"""
Abstract base class for compu... | import types
from abc import ABC
from typing import Any, Callable, List, Optional, Tuple, TypeVar, Union
import numpy as np
from docarray.computation import AbstractComputationalBackend
T = TypeVar('T')
class AbstractNumpyBasedBackend(AbstractComputationalBackend[T], ABC):
"""
Abstract base class for compu... |
from workflows.handler import WorkflowHandler # noqa
| import asyncio
from typing import Any, AsyncGenerator, List, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .types import RunResultT
from .utils import BUSY_WAIT_DELAY
class W... |
import os # type: ignore[import-not-found]
from exa_py import Exa # type: ignore
from langchain_core.utils import convert_to_secret_str
def initialize_client(values: dict) -> dict:
"""Initialize the client."""
exa_api_key = values.get("exa_api_key") or os.environ.get("EXA_API_KEY") or ""
values["exa_ap... | import os # type: ignore[import-not-found]
from typing import Dict
from exa_py import Exa # type: ignore
from langchain_core.utils import convert_to_secret_str
def initialize_client(values: Dict) -> Dict:
"""Initialize the client."""
exa_api_key = values.get("exa_api_key") or os.environ.get("EXA_API_KEY") ... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters i... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters i... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters i... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters i... |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderCo... | import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderCo... |
from langchain_core.prompts.prompt import PromptTemplate
_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n"""
CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate(
input_variables=["question"], template=_CREATE_DRAFT_ANSWER_TEMPLATE
)
_LIST_ASSERTIONS_TEMPLATE = """Here is a statement:
{statement}
Make a bullet point list... | # flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n"""
CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate(
input_variables=["question"], template=_CREATE_DRAFT_ANSWER_TEMPLATE
)
_LIST_ASSERTIONS_TEMPLATE = """Here is a statement:
{statement}
Make a bu... |
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str... | from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str... |
import numpy as np
import orjson
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray
from docarray.typing.tensor import NdArrayEmbedding
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros(... | import numpy as np
import orjson
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray
from docarray.typing.tensor import NdArrayEmbedding
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros(... |
"""Defines utilities for switching audio backends"""
import os
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
... | """Defines utilities for switching audio backends"""
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_aud... |
_base_ = './detr_r50_8xb2-500e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| _base_ = './detr_r50_8x2_500e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
import base64
import json
import pickle
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel
from llama_index.core.schema import BaseComponent
from .utils import import_module_from_qualified_name, get_qualified_name
class BaseSerializer(ABC):
@abstractmethod
def serialize... | import base64
import json
import pickle
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel
from llama_index.core.schema import BaseComponent
from .utils import import_module_from_qualified_name, get_qualified_name
class BaseSerializer(ABC):
@abstractmethod
def serialize... |
from langchain_core.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You s... | # flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally cap... |
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Opt... | import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Opt... |
"""
Hub is a central trustworthy that is aware of the existence of isolated apps, and that can reliably receive user queries and route them to the appropriate apps.
"""
from typing import Optional, Sequence, Callable
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.callbac... | """
Hub is a central trustworthy that is aware of the existence of isolated apps, and that can reliably receive user queries and route them to the appropriate apps.
"""
from typing import Optional, Sequence, Callable
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.callbac... |
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_mode... | from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_mode... |
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from ... | """
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from s... |
import numpy as np
import torch
from docarray import Document, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
NdArray,
Tensor,
TextUrl,
TorchTensor,
)
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
... | import numpy as np
from docarray import DocumentArray, Document, Image, Text
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=... |
import logging
import warnings
from typing import Any, Optional, Dict, Type
from llama_index.core.bridge.pydantic import (
Field,
model_serializer,
ValidationError,
BaseModel,
)
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.c... | import logging
from typing import Any, Optional
from llama_index.core.bridge.pydantic import Field, model_serializer, ValidationError
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.core.workflow import Event, StartEvent
logger = logging.get... |
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
fil... | _base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
fil... |
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='MobileNetV2',
out_indices=(2, 4, 6),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://mmdet/mobilen... | _base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='MobileNetV2',
out_indices=(2, 4, 6),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://mmdet/mobilen... |
"""Airtable reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from pyairtable import Table
class AirtableReader(BaseReader):
"""
Airtable reader. Reads data from a table in a base.
Args:
api_key (str): Airtable AP... | """Airtable reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from pyairtable import Table
class AirtableReader(BaseReader):
"""Airtable reader. Reads data from a table in a base.
Args:
api_key (str): Airtable API key... |
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import H... | # Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import N... |
import inspect
from keras.src.api_export import keras_export
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Zeros
f... | import inspect
from keras.src.api_export import keras_export
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Zeros
f... |
"""
Slides parser.
Contains parsers for .pptx files.
"""
import io
import os
import tempfile
from pathlib import Path
from typing import Dict, List, Optional
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.... | """
Slides parser.
Contains parsers for .pptx files.
"""
import os
import tempfile
from pathlib import Path
from typing import Dict, List, Optional
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.utils impo... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from docarray.documents.audio import AudioDoc
from docarray.documents.image import ImageDoc
from docarray.documents.mesh import Mesh3D, VerticesAndFaces
from docarray.documents.point_cloud import PointCloud3D, PointsAndColors
from docarray.documents.text import TextDoc
from docarray.documents.video import VideoDoc
__a... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label A... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label A... |
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... |
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class Sparse... | from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: Spars... |
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.pubmed import PubMedAPIWrapper
class PubmedQueryRun(BaseTool):
"""Tool that searches the PubMed API."""
name: st... | from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.pubmed import PubMedAPIWrapper
class PubmedQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Pu... |
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
# augmentation strategy originates from DETR.
train... | _base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.... |
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
... | import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
... |
"""
=============================
Recursive feature elimination
=============================
This example demonstrates how Recursive Feature Elimination
(:class:`~sklearn.feature_selection.RFE`) can be used to determine the
importance of individual pixels for classifying handwritten digits.
:class:`~sklearn.feature_s... | """
=============================
Recursive feature elimination
=============================
This example demonstrates how Recursive Feature Elimination
(:class:`~sklearn.feature_selection.RFE`) can be used to determine the
importance of individual pixels for classifying handwritten digits.
:class:`~sklearn.feature_s... |
"""
Remote file reader.
A loader that fetches an arbitrary remote page or file by URL and parses its contents.
"""
import re
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from llama_index.core import SimpleDirectoryReader
from llama_index.core.readers.base import BaseReader
from llama... | """Remote file reader.
A loader that fetches an arbitrary remote page or file by URL and parses its contents.
"""
import re
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from llama_index.core import SimpleDirectoryReader
from llama_index.core.readers.base import BaseReader
from llama_... |
import pytest
from docarray import DocumentArray, Document
@pytest.mark.parametrize(
'columns',
[
[
('is_true', 'bool'),
('test_long', 'long'),
('test_double', 'double'),
],
{'is_true': 'bool', 'test_long': 'long', 'test_double': 'double'},
],
)
... | from docarray import DocumentArray, Document
def test_data_type(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [
('is_true', 'bool'),
('test_long', 'long'),
('test_double'... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
from typing import Any, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import Mo... | from typing import Any, List, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock imp... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
f... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
f... |
"""Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
from uuid import UUID
import pytest
from blockbuster import BlockBuster, blockbuster_ctx
from pytest_mock import MockerFixture
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[BlockBuster]:
w... | """Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
from uuid import UUID
import pytest
from blockbuster import BlockBuster, blockbuster_ctx
from pytest import Config, Function, Parser
from pytest_mock import MockerFixture
@pytest.fixture(autouse=True)
def b... |
import os
import pytest
from typing import List
from unittest.mock import MagicMock, patch, AsyncMock
import uuid
from llama_index.core.base.base_selector import (
SelectorResult,
SingleSelection,
)
from llama_index.core.schema import QueryBundle
from llama_index.core.tools import ToolMetadata
from llama_index... | import os
import pytest
from typing import List
from unittest.mock import MagicMock, patch, AsyncMock
import uuid
from llama_index.core.base.base_selector import (
SelectorResult,
SingleSelection,
)
from llama_index.core.schema import QueryBundle
from llama_index.core.tools import ToolMetadata
from llama_index... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
import os
from typing import Optional
import fsspec
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import (
DEFAULT_BATCH_SIZE,
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
DEFAULT_PERSIST_PATH,
)
from llama_index.core.storage.k... | import os
from typing import Optional
import fsspec
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import (
DEFAULT_BATCH_SIZE,
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
DEFAULT_PERSIST_PATH,
)
from llama_index.core.storage.k... |
# Copyright (c) OpenMMLab. All rights reserved.
import json
import os
import tempfile
from typing import List, Optional
from mmengine.evaluator import BaseMetric
from mmengine.utils import track_iter_progress
from pycocotools.coco import COCO
from mmdet.registry import METRICS
try:
from pycocoevalcap.eval import... | # Copyright (c) OpenMMLab. All rights reserved.
import json
import os
import tempfile
from typing import List, Optional
from mmengine.evaluator import BaseMetric
from mmengine.utils import track_iter_progress
from pycocotools.coco import COCO
from mmdet.registry import METRICS
try:
from pycocoevalcap.eval import... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Qu... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
pa... |
from keras.src.api_export import keras_export
@keras_export(["keras.Initializer", "keras.initializers.Initializer"])
class Initializer:
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__()` method with the following
signature:
```pyth... | from keras.src.api_export import keras_export
@keras_export(["keras.Initializer", "keras.initializers.Initializer"])
class Initializer:
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__()` method with the following
signature:
```pyth... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import shutil
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
subprocess.run(
'scripts/download_full.sh',
cwd=Path(_... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import shutil
import subprocess
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture(scope="session", autouse=True)
def download_cache():
subprocess.run(
'scri... |
from backend.app import run_processes
from backend.executor import ExecutionManager
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(ExecutionManager())
if __name__ == "__main__":
main()
| from backend.app import run_processes
from backend.executor import DatabaseManager, ExecutionManager
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(ExecutionManager())
if __name__ == "__main__":
main()
|
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest... | import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytes... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api._tf_keras.keras.preprocessing import image
from keras.api._tf_keras.keras.preprocessing import sequence
from keras.api._tf_keras.keras.preprocessing import text
from keras.src.utils.i... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras._tf_keras.keras.preprocessing import image
from keras._tf_keras.keras.preprocessing import sequence
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras... |
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetIn... | # dataset settings
dataset_type = 'CityscapesDataset'
# TODO remove it after cityscape metric
# data_root = '/mnt/lustre/luochunhua.vendor/openmmlab2.0/data/cityscapes/'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.2.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.1.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... |
from typing_extensions import TYPE_CHECKING
from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray, AudioTensor
from docarray.typing.tensor.embedding.embedd... | from typing import (
Union,
TYPE_CHECKING,
TypeVar,
Sequence,
Optional,
List,
Dict,
Generator,
Iterable,
Tuple,
ForwardRef,
)
if TYPE_CHECKING: # pragma: no cover
import scipy.sparse
import tensorflow
import torch
import numpy as np
from PIL.Image import... |
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py']
# Use ClassAwareSampler
train_dataloader = dict(
sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1))
| _base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py']
# Use ClassAwareSampler
data = dict(
train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1)))
|
#!/usr/bin/env python3
# Tool quickly rebuild one or two files with debug info
# Mimics following behavior:
# - touch file
# - ninja -j1 -v -n torch_python | sed -e 's/-O[23]/-g/g' -e 's#\[[0-9]\+\/[0-9]\+\] \+##' |sh
# - Copy libs from build/lib to torch/lib folder
from __future__ import annotations
import subproces... | #!/usr/bin/env python3
# Tool quickly rebuild one or two files with debug info
# Mimics following behavior:
# - touch file
# - ninja -j1 -v -n torch_python | sed -e 's/-O[23]/-g/g' -e 's#\[[0-9]\+\/[0-9]\+\] \+##' |sh
# - Copy libs from build/lib to torch/lib folder
from __future__ import annotations
import subproces... |
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from ... | from .SentenceEvaluator import SentenceEvaluator
from .SimilarityFunction import SimilarityFunction
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
fro... |
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from ... | """Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from ... |
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
from jina.helper import random_port
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = random_port... | import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(re... |
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .cityscapes_utils import evaluateImgLists
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classe... | # Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .cityscapes_utils import evaluateImgLists
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classe... |
import csv
import pathlib
from typing import Any, Callable, Optional, Tuple
import torch
from PIL import Image
from .utils import check_integrity, verify_str_arg
from .vision import VisionDataset
class FER2013(VisionDataset):
"""`FER2013
<https://www.kaggle.com/c/challenges-in-representation-learning-facial... | import csv
import pathlib
from typing import Any, Callable, Optional, Tuple
import torch
from PIL import Image
from .utils import check_integrity, verify_str_arg
from .vision import VisionDataset
class FER2013(VisionDataset):
"""`FER2013
<https://www.kaggle.com/c/challenges-in-representation-learning-facial... |
import numpy as np
from docarray import BaseDoc
from docarray.array.doc_vec.doc_vec import DocVec
from docarray.typing import AnyTensor, NdArray
def test_da_init():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in range(4)]
da =... | import numpy as np
from docarray import BaseDoc
from docarray.array.stacked.array_stacked import DocArrayStacked
from docarray.typing import AnyTensor, NdArray
def test_da_init():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros(10), name='hello') for _ in rang... |
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export("keras.StatelessScope")
class StatelessScope:
"""Scope to prevent any update to Keras Variables.
The values of variables to be used inside the scope
should be passed via the `state_mapping` argum... | from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export("keras.StatelessScope")
class StatelessScope:
"""Scope to prevent any update to Keras Variables.
The values of variables to be used inside the scope
should be passed via the `state_mapping` argum... |
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import logging
import os.path as osp
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .default_scope import DefaultScope
from .registry import Registry
def traverse_registry_tree... | # Copyright (c) OpenMMLab. All rights reserved.
import datetime
import logging
import os.path as osp
from typing import Optional
from mmengine.fileio import dump
from mmengine.logging import print_log
from . import root
from .default_scope import DefaultScope
from .registry import Registry
def traverse_registry_tree... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOO... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOO... |
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
... | _base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
... |
import json
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
try:
import matplotlib
matplotlib.use('Agg')
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotli... | import json
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
try:
import matplotlib
matplotlib.use('Agg')
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotli... |
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
class InnerDoc(BaseDoc):
integer: int
inner_list: List
class MMDoc(BaseDoc):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image:... | from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''
price: int = 0
categories: Optional[List[str... |
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar... | from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import BlockchainDocumentLoader
from langchain_community.document_loaders.blockchain import BlockchainType
# Create a way to dynamically look up deprecated imports.
# U... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import BlockchainDocumentLoader
from langchain_community.document_loaders.blockchain import BlockchainType
# Create a way to dynamically look up deprecated imports.
# U... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def bui... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def bui... |
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
class C... | from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
class C... |
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float | None = None) -> None:
""... | from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfa... | # Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatData... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.losses import Reduction as Reduction
from keras.src.losses import deserialize as deserialize
from keras.src.losses import get as get
from keras.src.losses import serialize as s... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.losses import Reduction
from keras.src.losses import deserialize
from keras.src.losses import get
from keras.src.losses import serialize
from keras.src.losses.loss import Loss
... |
from typing import Any
from unittest.mock import Mock, patch
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain.runnables.hub import HubRunnable
@patch("langchain.hub.pull")
def test_hub_runnable(mock_pull: Mock) -> None:
mock_pull.return_... | from typing import Any
from unittest.mock import Mock, patch
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain.runnables.hub import HubRunnable
@patch("langchain.hub.pull")
def test_hub_runnable(mock_pull: Mock) -> None:
mock_pull.return_... |
from __future__ import annotations
from dataclasses import field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes ... | from __future__ import annotations
from dataclasses import field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes ... |
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_1.6gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=Tr... | _base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_1.6gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_gr... |
from typing import TYPE_CHECKING, Type
if TYPE_CHECKING:
from pandas import DataFrame
from docarray.typing import T
class DataframeIOMixin:
"""Save/load from :class:`pandas.dataframe`
.. note::
These functions require you to install `pandas`
"""
def to_dataframe(self, **kwargs) -> ... | from typing import TYPE_CHECKING, Type
if TYPE_CHECKING:
from pandas import DataFrame
from ....typing import T
class DataframeIOMixin:
"""Save/load from :class:`pandas.dataframe`
.. note::
These functions require you to install `pandas`
"""
def to_dataframe(self, **kwargs) -> 'Data... |
__version__ = "3.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import Lo... | __version__ = "2.2.2"
__MODEL_HUB_ORGANIZATION__ = 'sentence-transformers'
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncod... |
import re
from typing import TYPE_CHECKING, Any, Dict, Union
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init_... | class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
"""
This is called during training to evaluate the model.
... |
"""Test in memory docstore."""
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just received a new result from ... | """Test in memory docstore."""
from typing import Dict
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just re... |
import numpy as np
def approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely... | import numpy as np
def approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely... |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless r... | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless r... |
_base_ = './dino-4scale_r50_8xb2-12e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
num_levels = 5
model = dict(
num_feature_levels=num_levels,
backbone=dict(
_delete_=True,
type='SwinTransformer',
... | _base_ = './dino-4scale_r50_8xb2-12e_coco.py'
fp16 = dict(loss_scale=512.)
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
num_levels = 5
model = dict(
num_feature_levels=num_levels,
backbone=dict(
_delete_=True,
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import MongoDBChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import MongoDBChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... |
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
de... | import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
de... |
from .checkpointer import Checkpoint, WorkflowCheckpointer
from .context import Context
from .context_serializers import JsonPickleSerializer, JsonSerializer
from .decorators import step
from .errors import WorkflowRuntimeError, WorkflowTimeoutError, WorkflowValidationError
from .events import Event, HumanResponseEvent... | from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.drawing import (
draw_all_possible_flows,
draw_most_recent_execution,
)
from llama_index.core.workflow.errors import (
WorkflowRuntimeError,
WorkflowTimeoutError,
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.