input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, Mesh3DUrl
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDocument):
"""
Document for handling meshes for 3D data representation.
A mesh is a rep... | from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, Mesh3DUrl
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDocument):
"""
Document for handling meshes for 3D data representation.
A mesh is a rep... |
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir,... | import os
from typing import Dict
DEPLOYMENT_FILES = [
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 're... |
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
... | import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
... |
"""Patentsview reader that reads patent abstract."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
BASE_URL = "https://api.patentsview.org/patents/query"
class PatentsviewReader(BaseReader):
"""
Patentsview reader.
... | """Patentsview reader that reads patent abstract."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
BASE_URL = "https://api.patentsview.org/patents/query"
class PatentsviewReader(BaseReader):
"""Patentsview reader.
R... |
# Copyright (c) OpenMMLab. All rights reserved.
from .dist import (all_gather_object, all_reduce, all_gather, all_reduce_dict,
collect_results, gather, broadcast, gather_object,
sync_random_seed, broadcast_object_list,
collect_results_cpu, collect_results_gpu, al... | # Copyright (c) OpenMMLab. All rights reserved.
from .dist import (all_gather_object, all_reduce, all_gather, all_reduce_dict,
collect_results, gather, broadcast, gather_object,
sync_random_seed, broadcast_object_list,
collect_results_cpu, collect_results_gpu, al... |
import logging
import sys
import traceback
from datasets import Dataset, load_dataset
from peft import LoraConfig, TaskType
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerModelCardData,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
)
from sentence_trans... | import logging
import sys
import traceback
from datasets import Dataset, load_dataset
from peft import LoraConfig, TaskType
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerModelCardData,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
)
from sentence_trans... |
from typing import TYPE_CHECKING
import pytest
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from pytest_mock import MockerFixture
from langchain_community.chat_message_histories import ZepChatMessageHistory
if TYPE_CHECKING:
from zep_python import ZepClient
@pytest.fixture
@pytest... | from typing import TYPE_CHECKING
import pytest
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from pytest_mock import MockerFixture
from langchain_community.chat_message_histories import ZepChatMessageHistory
if TYPE_CHECKING:
from zep_python import ZepClient
@pytest.fixture
@pytest... |
import io
import warnings
from abc import ABC
from typing import TYPE_CHECKING
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
... | import io
import warnings
from abc import ABC
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes... |
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import early_stopping, log_evaluation, record_evaluation, reset_parameter
fr... | # coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import early_stopping, log_evaluation, print_evaluation, record_evaluation, ... |
from typing import Callable, Optional
from .. import Features, NamedSplit, Split
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
... | from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional... |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _NoDuplicateSafeLoader(yaml.SafeLoader):
def _check_no_duplicates_on_constructed_node(self, node):
keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
keys = [tuple(... | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _NoDuplicateSafeLoader(yaml.SafeLoader):
def _check_no_duplicates_on_constructed_node(self, node):
keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
keys = [tuple(... |
import os
import yaml
from jina.serve.runtimes.gateway.gateway import BaseGateway, Gateway
from jina.jaml import JAML
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
async def shutdown(s... | import os
import pytest
import yaml
from jina import Gateway
from jina.jaml import JAML
from jina.serve.executors import BaseExecutor
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
asyn... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import os
from PIL import Image
from jina import Executor
from jina.executors import BaseExecutor
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'confi... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from PIL import Image
from jina.executors import BaseExecutor
def test_io_images_and_text(test_dir,doc_generator_img_text, expected_text):
crafter = BaseExecutor.load_config('config.yml')
doc... |
import copy
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, fil... | from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _s... |
import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches... | import itertools
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batches... |
"""langchain-core version information and utilities."""
VERSION = "0.3.56rc1"
| """langchain-core version information and utilities."""
VERSION = "0.3.55"
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new l... | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
class MSELoss(nn.Module):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in... |
from typing import Any, Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.couchbase import CouchbaseKVStore
class CouchbaseIndexStore(KVIndexStore):
"""Couchbase Index store."""
def __init__(
self,
couchbase_kvstore: Co... | from typing import Any, Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.couchbase import CouchbaseKVStore
class CouchbaseIndexStore(KVIndexStore):
"""Couchbase Index store."""
def __init__(
self,
couchbase_kvstore: Co... |
from docarray.base_document.any_document import AnyDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.document import BaseDocument
from docarray.base_document.document_response import DocumentResponse
__all__ = ['AnyDocument', 'BaseDocument', 'BaseNode', 'DocumentResponse']
| from docarray.base_document.any_document import AnyDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.document import BaseDocument
__all__ = ['AnyDocument', 'BaseDocument', 'BaseNode']
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.graph_qa.prompts import (
AQL_FIX_TEMPLATE,
AQL_GENERATION_TEMPLATE,
AQL_QA_TEMPLATE,
CYPHER_GENERATION_PROMPT,
CYPHER_GENERATION_TEMPLATE,
... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.graph_qa.prompts import (
AQL_FIX_TEMPLATE,
AQL_GENERATION_TEMPLATE,
AQL_QA_TEMPLATE,
CYPHER_GENERATION_PROMPT,
CYPHER_GENERATION_TEMPLATE,
... |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from data... | import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from data... |
import csv
import os
from pathlib import Path
from typing import Union
import torchaudio
from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* [:footcite:`fluent`] Dataset
Args:
root (str of Path): Path to the directory where the dataset is foun... | import csv
import os
from pathlib import Path
from typing import Union
import torchaudio
from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* [:footcite:`fluent`] Dataset
Args:
root (str of Path): Path to the directory where the dataset is foun... |
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
... | from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
... |
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
tra... | _base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640)
train_pipeline = [
... |
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import ... | from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule
from .utils import ... |
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_new_project_parser(parser=None):
"""Set the parser for `new`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument... | """Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_new_project_parser(parser=None):
"""Set the parser for `new`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument... |
from torchaudio.datasets import librispeech
from torchaudio_unittest.common_utils import TorchaudioTestCase
from torchaudio_unittest.datasets.librispeech_test_impl import LibriSpeechTestMixin
class TestLibriSpeech(LibriSpeechTestMixin, TorchaudioTestCase):
librispeech_cls = librispeech.LIBRISPEECH
| import os
from pathlib import Path
from torchaudio.datasets import librispeech
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TW... |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmcv.utils import ConfigDict
from mmdet.models.utils.transformer import (DetrTransformerDecoder,
DetrTransformerEncoder,
Transformer)
def test_detr_transformer_de... | import pytest
from mmcv.utils import ConfigDict
from mmdet.models.utils.transformer import (DetrTransformerDecoder,
DetrTransformerEncoder,
Transformer)
def test_detr_transformer_dencoder_encoder_layer():
config = ConfigDict(... |
"""Tool for the OpenWeatherMap API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
class OpenWeatherMapQueryRun(BaseT... | """Tool for the OpenWeatherMap API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
class OpenWeatherMapQueryRun(BaseT... |
"""AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT... | """AgentQL Web Reader."""
import httpx
from typing import Optional, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
import logging
logging.getLogger("root").setLevel(logging.INFO)
QUERY_DATA_ENDPOINT = "https://api.agentql.com/v1/query-data"
API_TIMEOUT... |
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransf... | from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransfo... |
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICE... | # ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICE... |
import pytest as pytest
from langchain_core.documents import Document
from langchain.retrievers.multi_query import LineListOutputParser, _unique_documents
@pytest.mark.parametrize(
"documents,expected",
[
([], []),
([Document(page_content="foo")], [Document(page_content="foo")]),
([Do... | from typing import List
import pytest as pytest
from langchain_core.documents import Document
from langchain.retrievers.multi_query import LineListOutputParser, _unique_documents
@pytest.mark.parametrize(
"documents,expected",
[
([], []),
([Document(page_content="foo")], [Document(page_conte... |
# coding: utf-8
"""Tests for dual GPU+CPU support."""
import os
import platform
import pytest
from sklearn.metrics import log_loss
import lightgbm as lgb
from .utils import load_breast_cancer
@pytest.mark.skipif(
os.environ.get("LIGHTGBM_TEST_DUAL_CPU_GPU", None) is None,
reason="Only run if appropriate e... | # coding: utf-8
"""Tests for dual GPU+CPU support."""
import os
import platform
import pytest
from sklearn.metrics import log_loss
import lightgbm as lgb
from .utils import load_breast_cancer
@pytest.mark.skipif(
os.environ.get("LIGHTGBM_TEST_DUAL_CPU_GPU", None) is None,
reason="Only run if appropriate e... |
"""A tracer that runs evaluators over completed runs."""
from langchain_core.tracers.evaluation import (
EvaluatorCallbackHandler,
wait_for_all_evaluators,
)
__all__ = ["EvaluatorCallbackHandler", "wait_for_all_evaluators"]
| """A tracer that runs evaluators over completed runs."""
from langchain_core.tracers.evaluation import (
EvaluatorCallbackHandler,
wait_for_all_evaluators,
)
__all__ = ["wait_for_all_evaluators", "EvaluatorCallbackHandler"]
|
# CoSENTLoss must be imported before AnglELoss
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchH... | from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .CosineSimilarityLoss import CosineSimilarityLoss
from .SoftmaxLoss import SoftmaxLoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .TripletLoss i... |
from __future__ import annotations
import torch
from sentence_transformers.models.Module import Module
class SpladePooling(Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language m... | from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a mas... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... |
from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
... | from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
... |
from typing import Any, Literal
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.util.request import requests
TEST_CREDEN... | from typing import Any, Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
from b... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
"""Sample a fraction of the Spider dataset."""
import argparse
import json
import os
import random
import shutil
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create a sampled version of the Spider dataset."
)
parser.add_argument(
"--input",
type=str,
... | """Sample a fraction of the Spider dataset."""
import argparse
import json
import os
import random
import shutil
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create a sampled version of the Spider dataset."
)
parser.add_argument(
"--input",
type=str,
... |
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooli... | from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transform... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.anthropic import (
ChatAnthropic,
convert_messages_to_prompt_anthropic,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate log... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.anthropic import (
ChatAnthropic,
convert_messages_to_prompt_anthropic,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate log... |
from typing import Iterable, Type
from docarray.array.abstract_array import AbstractDocumentArray
from docarray.array.mixins import GetAttributeArrayMixin, ProtoArrayMixin
from docarray.document import AnyDocument, BaseDocument, BaseNode
class DocumentArray(
list,
ProtoArrayMixin,
GetAttributeArrayMixin,... | from typing import Iterable, Type
from docarray.array.abstract_array import AbstractDocumentArray
from docarray.array.mixins import GetAttributeArrayMixin, ProtoArrayMixin
from docarray.document import AnyDocument, BaseDocument, BaseNode
class DocumentArray(
list,
ProtoArrayMixin,
GetAttributeArrayMixin,... |
import pytest
from langchain_core.memory import BaseMemory
from langchain.chains.conversation.memory import (
ConversationBufferMemory,
ConversationBufferWindowMemory,
ConversationSummaryMemory,
)
from langchain.memory import ReadOnlySharedMemory, SimpleMemory
from tests.unit_tests.llms.fake_llm import Fak... | import pytest
from langchain_core.memory import BaseMemory
from langchain.chains.conversation.memory import (
ConversationBufferMemory,
ConversationBufferWindowMemory,
ConversationSummaryMemory,
)
from langchain.memory import ReadOnlySharedMemory, SimpleMemory
from tests.unit_tests.llms.fake_llm import Fak... |
from gravitasml.parser import Parser
from gravitasml.token import tokenize
from backend.data.block import Block, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class XMLParserBlock(Block):
class Input(BlockSchema):
input_xml: str = SchemaField(description="input xml to be parsed")
... | from gravitasml.parser import Parser
from gravitasml.token import tokenize
from backend.data.block import Block, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class XMLParserBlock(Block):
class Input(BlockSchema):
input_xml: str = SchemaField(description="input xml to be parsed")
... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocess... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocess... |
import os
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Dict
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.io.json import orjson_dumps_and_decode
from docarray.base_doc.mixins import IOMixi... | import os
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_doc.mixins impor... |
import pytest
from sklearn.base import (
BaseEstimator,
RegressorMixin,
TransformerMixin,
)
from sklearn.utils._tags import get_tags
class NoTagsEstimator:
pass
class ClassifierEstimator:
# This is to test whether not inheriting from mixins works.
_estimator_type = "classifier"
class Empt... | import pytest
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin
from sklearn.utils._tags import get_tags
class NoTagsEstimator:
pass
class ClassifierEstimator:
# This is to test whether not inheriting from mixins works.
_estimator_type = "classifier"
@pytest.mark.parametrize(
... |
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
... | from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
... |
from typing import Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput
JinaCredentials = APIKeyCredentials
JinaCredentialsInput = CredentialsMetaInput[
Literal["jina"],... | from typing import Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput
JinaCredentials = APIKeyCredentials
JinaCredentialsInput = CredentialsMetaInput[
Literal["jina"],... |
"""Test base tool child implementations."""
import inspect
import re
from typing import List, Type
import pytest
from langchain_core.tools import BaseTool
from langchain_community.tools.amadeus.base import AmadeusBaseTool
from langchain_community.tools.gmail.base import GmailBaseTool
from langchain_community.tools.o... | """Test base tool child implementations."""
import inspect
import re
from typing import List, Type
import pytest
from langchain_core.tools import BaseTool
from langchain_community.tools.amadeus.base import AmadeusBaseTool
from langchain_community.tools.gmail.base import GmailBaseTool
from langchain_community.tools.o... |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pyt... | import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, grou... |
"""Test HyDE."""
from typing import Any, Optional
import numpy as np
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs im... | """Test HyDE."""
from typing import Any, List, Optional
import numpy as np
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outp... |
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... |
from .database import DatabaseManager, DatabaseManagerClient
from .manager import ExecutionManager
from .scheduler import Scheduler
__all__ = [
"DatabaseManager",
"DatabaseManagerClient",
"ExecutionManager",
"Scheduler",
]
| from .database import DatabaseManager
from .manager import ExecutionManager
from .scheduler import Scheduler
__all__ = [
"DatabaseManager",
"ExecutionManager",
"Scheduler",
]
|
from typing import Dict, List, Optional, Set, Tuple
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
... | from typing import Dict, List, Optional, Set, Tuple
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
... |
# flake8: noqa
JIRA_ISSUE_CREATE_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira issue_create API, useful when you need to create a Jira issue.
The input to this tool is a dictionary specifying the fields of the Jira issue, and will be passed into atlassian-python-api's Jira `issue_creat... | # flake8: noqa
JIRA_ISSUE_CREATE_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira issue_create API, useful when you need to create a Jira issue.
The input to this tool is a dictionary specifying the fields of the Jira issue, and will be passed into atlassian-python-api's Jira `issue_creat... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from ... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extr... |
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from ... | import numpy as np
import torch
from docarray import BaseDocument, DocumentArray, Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import ... |
from __future__ import annotations
import logging
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown
from langchain.agents.agent import AgentOutputParser
logger = lo... | from __future__ import annotations
import logging
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.json import parse_json_markdown
from langchain.agents.agent import AgentOutputParser
lo... |
from typing import Any
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
def is_to... | try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
def is_torch_available():
return torch_imported
... |
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):... | # Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):... |
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level... | """
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
f... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
f... |
from .torch_object_detection_segmenter import TorchObjectDetectionSegmenter
| from .torch_object_detection_segmenter import TorchObjectDetectionSegmenter |
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: Senten... | from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: Senten... |
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLO',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
... | _base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLO',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
... |
import pytest
from llama_index.readers.github import GithubRepositoryReader
class MockGithubClient:
pass
@pytest.fixture()
def github_reader():
return GithubRepositoryReader(
github_client=MockGithubClient(), owner="owner", repo="repo"
)
@pytest.mark.parametrize(
("blob_url", "expected_bas... | import pytest
from llama_index.readers.github import GithubRepositoryReader
class MockGithubClient:
pass
@pytest.fixture()
def github_reader():
return GithubRepositoryReader(
github_client=MockGithubClient(), owner="owner", repo="repo"
)
@pytest.mark.parametrize(
("blob_url", "expected_bas... |
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
... | _base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
... |
from __future__ import annotations
import math
from pathlib import Path
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
sk... | from __future__ import annotations
import math
from pathlib import Path
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec... |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.core.mask import BitmapMasks, PolygonMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.... | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.core.mask import BitmapMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = n... |
import os
import time
import pytest
from docarray import Document
from jina import Flow
from jina.constants import __cache_path__
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def filewriter_exec_docker_image_built():
import docker
client = docker.from_env()
clie... | import os
import time
import pytest
from docarray import Document
from jina import Flow, __cache_path__
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='module')
def filewriter_exec_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
p... |
# Copyright (c) OpenMMLab. All rights reserved.
from .det_data_sample import DetDataSample
__all__ = ['DetDataSample']
| # Copyright (c) OpenMMLab. All rights reserved.
from .general_data import GeneralData
from .instance_data import InstanceData
__all__ = ['GeneralData', 'InstanceData']
|
"""Test in memory indexer."""
from collections.abc import AsyncGenerator, Generator
import pytest
from langchain_tests.integration_tests.indexer import (
AsyncDocumentIndexTestSuite,
DocumentIndexerTestSuite,
)
from langchain_core.documents import Document
from langchain_core.indexing.base import DocumentInd... | """Test in memory indexer."""
from collections.abc import AsyncGenerator, Generator
import pytest
from langchain_tests.integration_tests.indexer import (
AsyncDocumentIndexTestSuite,
DocumentIndexerTestSuite,
)
from langchain_core.documents import Document
from langchain_core.indexing.base import DocumentInd... |
"""Llava Completion Pack."""
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.llms.replicate import Replicate
class LlavaCompletionPack(BaseLlamaPack):
"""Llava Completion pack."""
def __init__(
self,
image_url: str,
**kwargs: ... | """Llava Completion Pack."""
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.llms.replicate import Replicate
class LlavaCompletionPack(BaseLlamaPack):
"""Llava Completion pack."""
def __init__(
self,
image_url: str,
**kwargs:... |
"""
==============================
Ordinary Least Squares Example
==============================
This example shows how to use the ordinary least squares (OLS) model
called :class:`~sklearn.linear_model.LinearRegression` in scikit-learn.
For this purpose, we use a single feature from the diabetes dataset and try to
p... | """
=========================================================
Linear Regression Example
=========================================================
The example below uses only the first feature of the `diabetes` dataset,
in order to illustrate the data points within the two-dimensional plot.
The straight line can be seen... |
from typing import Optional
import numpy as np
from docarray import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import Tensor
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobu... | from typing import Optional
import numpy as np
from docarray import DocumentArray
from docarray.document import BaseDocument
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_ndarray():
... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | import numpy as np
import pytest
from typing import Dict, List
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
... |
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
logger = logging.getLogger(__name__)
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
Includes enhanced... | import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
logger = logging.getLogger(__name__)
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
Includes enhanced... |
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_singl... | import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_b... |
from pathlib import Path
from typing import Any, Iterator, List
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.document_loaders.text import TextLoader
def test_raise_error_if_path_not_exist() -> None:
loader =... | from pathlib import Path
from typing import Any, Iterator, List
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.document_loaders.text import TextLoader
def test_raise_error_if_path_not_exist() -> None:
loader =... |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.backbones import ResNet
from mmdet.models.builder import SHARED_HEADS
from mmdet.models.utils import ResLayer as _ResLayer
@SHARED_HEADS.register_module()
class ResLa... | import warnings
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.backbones import ResNet
from mmdet.models.builder import SHARED_HEADS
from mmdet.models.utils import ResLayer as _ResLayer
@SHARED_HEADS.register_module()
class ResLayer(BaseModule):
def __init__(self,
... |
import copy
import os.path as osp
import unittest
from mmcv.transforms import Compose
from mmdet.datasets.transforms import MultiBranch
from mmdet.utils import register_all_modules
register_all_modules()
class TestMultiBranch(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which ... | import copy
import os.path as osp
import unittest
from mmcv.transforms import Compose
from mmdet.datasets.transforms import MultiBranch
from mmdet.utils import register_all_modules
register_all_modules()
class TestMultiBranch(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which ... |
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTen... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarr... |
# TODO: deprecate
agent_instructions = """You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. \
You will then get back a response in the form <observation></observation>
... | # flake8: noqa
# TODO: deprecate
agent_instructions = """You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. \
You will then get back a response in the form <observation>... |
_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_co... | _base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_co... |
from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this ... | from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this ... |
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']
model = dict(
backbone=dict(
type='ResNet',
depth=34,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_c... | _base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
model = dict(
backbone=dict(
type='ResNet',
depth=34,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_c... |
import os
import socket
from typing import TYPE_CHECKING, Optional
def get_docker_network(client) -> Optional[str]:
"""Do a best-effort guess if the caller is already in a docker network
Check if `hostname` exists in list of docker containers.
If a container is found, check its network id
:param cli... | import os
import socket
from typing import Optional, TYPE_CHECKING
def get_docker_network(client) -> Optional[str]:
"""Do a best-effort guess if the caller is already in a docker network
Check if `hostname` exists in list of docker containers.
If a container is found, check its network id
:param cl... |
"""Module contains a few fake embedding models for testing purposes."""
# Please do not add additional fake embedding model implementations here.
import hashlib
from pydantic import BaseModel
from typing_extensions import override
from langchain_core.embeddings import Embeddings
class FakeEmbeddings(Embeddings, Ba... | """Module contains a few fake embedding models for testing purposes."""
# Please do not add additional fake embedding model implementations here.
import hashlib
from pydantic import BaseModel
from typing_extensions import override
from langchain_core.embeddings import Embeddings
class FakeEmbeddings(Embeddings, Ba... |
import os
import pytest
from google.ai.generativelanguage_v1beta.types import (
FunctionCallingConfig,
ToolConfig,
)
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.core.prompts.base import ChatPromptTemplate... | import os
import pytest
from google.ai.generativelanguage_v1beta.types import (
FunctionCallingConfig,
ToolConfig,
)
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, MessageRole
from llama_index.core.prompts.base import ChatPromptTemplate... |
import torch
from docarray import Document
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(Document):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tens... | import torch
from docarray import Document
from docarray.typing import TorchTensor
def test_set_torch_tensor():
class MyDocument(Document):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor... |
from typing import Any, Optional, Type, TypeVar, Union
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docar... | from typing import Any, Optional, Type, TypeVar, Union
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
T = Type... |
from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
from llama_index.core.utils import infer_torch_device
class ImageVisionLLMReader(BaseReader):
"""
Image parser.
Caption image ... | from pathlib import Path
from typing import Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, ImageDocument
from llama_index.core.utils import infer_torch_device
class ImageVisionLLMReader(BaseReader):
"""Image parser.
Caption image using... |
import json
import logging
from enum import Enum
from typing import Any
from requests.exceptions import HTTPError, RequestException
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
logger = logging.getLo... | import json
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
... |
"""
Example showing how to use the SpladeLambdaSchedulerCallback to gradually
increase the lambda parameters during training of a SPLADE model.
"""
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SchedulerType,
SparseEncoder,
SparseEncoderTrainer,
Sp... | from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladeLoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.