input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from sentence_transformers import SentenceTr... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from sentence_transformers import SentenceTr... |
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransfor... | from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransfor... |
"""Standard LangChain interface tests"""
import base64
from pathlib import Path
from typing import Literal, cast
import httpx
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_tests.integration_tests import ChatModelIntegr... | """Standard LangChain interface tests"""
import base64
from pathlib import Path
from typing import Literal, cast
import httpx
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_tests.integration_tests import ChatModelIntegr... |
"""Test Azure OpenAI Chat API wrapper."""
import os
from unittest import mock
import pytest
from langchain_core.messages import HumanMessage
from typing_extensions import TypedDict
from langchain_openai import AzureChatOpenAI
def test_initialize_azure_openai() -> None:
llm = AzureChatOpenAI( # type: ignore[ca... | """Test Azure OpenAI Chat API wrapper."""
import os
from unittest import mock
import pytest
from typing_extensions import TypedDict
from langchain_openai import AzureChatOpenAI
def test_initialize_azure_openai() -> None:
llm = AzureChatOpenAI( # type: ignore[call-arg]
azure_deployment="35-turbo-dev",
... |
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely simila... | """
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely simila... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
fr... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
fr... |
import copy
from typing import Any, Dict, List, Tuple
_SPECIFIC_EXECUTOR_SEPARATOR = '__'
def _spit_key_and_executor_name(key_name: str) -> Tuple[str]:
"""Split a specific key into a key, name pair
ex: 'key__my_executor' will be split into 'key', 'my_executor'
:param key_name: key name of the param
... | import copy
from typing import Dict, Tuple
from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler
_SPECIFIC_EXECUTOR_SEPARATOR = '__'
def _spit_key_and_executor_name(key_name: str) -> Tuple[str]:
"""Split a specific key into a key, name pair
ex: 'key__my_executor' will be ... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import i... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from ..hnswlib_searc... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestGLIP(TestCas... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestGLIP(TestCas... |
import gc
import unittest
import torch
from diffusers import (
StableDiffusionImg2ImgPipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_... | import gc
import unittest
import torch
from diffusers import (
StableDiffusionImg2ImgPipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
)
from .single_file_testing_utils import SDSingleFileTesterMixin
enab... |
_base_ = 'tridentnet_r50-caffe_ms-1x_coco.py'
# learning rate
max_epochs = 36
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
... | _base_ = 'tridentnet_r50_caffe_mstrain_1x_coco.py'
# learning rate
max_epochs = 36
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR'... |
import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of mult... | import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of mult... |
_base_ = [
'../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
val_evaluator = dict(metric='proposal_fast')
test_evaluator = val_evaluator
# inference on val dataset and dump the proposals with evaluate metric
# data... | _base_ = [
'../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
val_evaluator = dict(metric='proposal_fast')
test_evaluator = val_evaluator
# inference on val dataset and dump the proposals with evaluate metric
# data... |
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from google.protobuf import __version__ as __pb__version__
else:
protobuf = import_library('google.protobuf', raise_error=True)
__pb__version__ = protobuf.__version__
if __pb__version__.startswith... | from typing import TYPE_CHECKING
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from google.protobuf import __version__ as __pb__version__
else:
protobuf = import_library('google.protobuf', raise_error=True)
__pb__version__ = protobuf.__version__
if __pb__version__.startswith... |
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cann... | """
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cann... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
"""**Tracers** are classes for tracing runs.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> BaseTracer --> <name>Tracer # Examples: LangChainTracer, RootListenersTracer
--> <name> # Examples: LogStreamCallbackHandler
""" # noqa: E501
from importlib import ... | """**Tracers** are classes for tracing runs.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> BaseTracer --> <name>Tracer # Examples: LangChainTracer, RootListenersTracer
--> <name> # Examples: LogStreamCallbackHandler
""" # noqa: E501
__all__ = [
"BaseT... |
from typing import TYPE_CHECKING
import torch
if TYPE_CHECKING: # pragma: no cover
from torch import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
... | from typing import TYPE_CHECKING
import torch
if TYPE_CHECKING:
from torch import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: torc... |
from uuid import UUID
import pytest
from pydantic import schema_json_of
from pydantic.tools import parse_obj_as
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ID
@pytest.mark.parametrize(
'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')]
)
def test_id_valida... | from uuid import UUID
import pytest
from pydantic import schema_json_of
from pydantic.tools import parse_obj_as
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ID
@pytest.mark.parametrize(
'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')]
)
def test_id_valida... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_... | """
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_... |
import unittest
import torch
from mmengine.config import Config
from mmengine.data import InstanceData
from mmengine.testing import assert_allclose
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.models.seg_heads.panoptic_fusion_heads import HeuristicFusionHead
class TestHeuristicFusionHead(unittest.TestCas... | import unittest
import torch
from mmengine.config import Config
from mmengine.data import InstanceData
from mmengine.testing import assert_allclose
from mmdet.core.evaluation import INSTANCE_OFFSET
from mmdet.models.seg_heads.panoptic_fusion_heads import HeuristicFusionHead
class TestHeuristicFusionHead(unittest.Te... |
"""Markdown node parser."""
import re
from typing import Any, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema impor... | """Markdown node parser."""
import re
from typing import Any, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema impor... |
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
... | from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the t... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to in... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to in... |
import pytest
from llama_index.embeddings.openai.utils import (
DEFAULT_OPENAI_API_BASE,
DEFAULT_OPENAI_API_VERSION,
MISSING_API_KEY_ERROR_MESSAGE,
resolve_openai_credentials,
validate_openai_api_key,
)
def test_validate_openai_api_key_with_valid_key() -> None:
validate_openai_api_key("valid_a... | import pytest
from llama_index.embeddings.openai.utils import (
resolve_openai_credentials,
validate_openai_api_key,
MISSING_API_KEY_ERROR_MESSAGE,
DEFAULT_OPENAI_API_BASE,
DEFAULT_OPENAI_API_VERSION,
)
def test_validate_openai_api_key_with_valid_key() -> None:
validate_openai_api_key("valid_a... |
from typing import Literal
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput, OAuth2Credentials
from backend.integrations.providers import ProviderName
from backend.util.settings import Secrets
# --8<-- [start:GoogleOAuthIsConfigured]
secrets = Secrets()
GOOGLE_OAUT... | from typing import Literal
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput, OAuth2Credentials
from backend.util.settings import Secrets
# --8<-- [start:GoogleOAuthIsConfigured]
secrets = Secrets()
GOOGLE_OAUTH_IS_CONFIGURED = bool(
secrets.google_client_id and... |
from typing import List, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.you impor... | from typing import List, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.you impor... |
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
... | from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
s... |
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.f... | import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.f... |
STRUCTURED_FORMAT_INSTRUCTIONS = """The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```":
```json
{{
{format}
}}
```""" # noqa: E501
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS = """
```json
{{
{format}
}}
```"""
PYDANTIC_FORMAT_INSTRUCTIO... | # flake8: noqa
STRUCTURED_FORMAT_INSTRUCTIONS = """The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```":
```json
{{
{format}
}}
```"""
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS = """
```json
{{
{format}
}}
```"""
PYDANTIC_FORMAT_INSTRUCT... |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
"""Test Baichuan Text Embedding."""
from langchain_community.embeddings.baichuan import BaichuanTextEmbeddings
def test_baichuan_embedding_documents() -> None:
"""Test Baichuan Text Embedding for documents."""
documents = ["今天天气不错", "今天阳光灿烂"]
embedding = BaichuanTextEmbeddings()
output = embedding.em... | """Test Baichuan Text Embedding."""
from langchain_community.embeddings.baichuan import BaichuanTextEmbeddings
def test_baichuan_embedding_documents() -> None:
"""Test Baichuan Text Embedding for documents."""
documents = ["今天天气不错", "今天阳光灿烂"]
embedding = BaichuanTextEmbeddings() # type: ignore[call-arg]... |
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class TestFireworksStandard(ChatModel... | """Standard LangChain interface tests"""
from typing import Tuple, Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class... |
"""
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding b... | """
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding b... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseE... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
... |
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Sema... | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Sema... |
import random
import time
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
@pytest.mark.parametrize('stream', [True, False])
@pytest.mark.parametrize('protocol', ['grpc'])
def test_return_order_in_client(protocol, stream):
class ExecutorRandomSleepExecutor(Executor):
... | import random
import time
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
@pytest.mark.parametrize('stream', [True, False])
@pytest.mark.parametrize('protocol', ['grpc'])
def test_return_order_in_client(protocol, stream):
class ExecutorRandomSleepExecutor(Executor):
... |
"""
=====================
Classifier comparison
=====================
A comparison of several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by... | """
=====================
Classifier comparison
=====================
A comparison of several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by... |
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydan... | import io
import wave
from typing import TYPE_CHECKING, Any, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.... |
"""Callback Handler streams to stdout on new llm token."""
import sys
from typing import Any, Optional
from langchain_core.callbacks import StreamingStdOutCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class FinalStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
"""Callba... | """Callback Handler streams to stdout on new llm token."""
import sys
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import StreamingStdOutCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class FinalStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
... |
from jina.clients.base.websocket import WebSocketBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncPostMixin,
HealthCheckMixin,
PostMixin,
)
class WebSocketClient(WebSocketBaseClient, PostMixin, HealthCheckMixin):
"""A client connecting to a Gateway using WebSocket protocol.
... | from jina.clients.base.websocket import WebSocketBaseClient
from jina.clients.mixin import AsyncPostMixin, HealthCheckMixin, PostMixin
class WebSocketClient(WebSocketBaseClient, PostMixin, HealthCheckMixin):
"""A client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :met... |
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_mask=True,
pad_size_divisor=32),
ba... | # model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
ty... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Con... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Con... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
from unittest import mock
import pytest
from llama_index.core.workflow import Context
from llama_index.core.workflow.handler import WorkflowHandler
def test_str():
h = WorkflowHandler()
h.set_result([])
assert str(h) == "[]"
@pytest.mark.asyncio()
async def test_stream_no_context():
h = WorkflowHan... | from unittest import mock
import pytest
from llama_index.core.workflow import Context
from llama_index.core.workflow.handler import WorkflowHandler
def test_str():
h = WorkflowHandler()
h.set_result([])
assert str(h) == "[]"
@pytest.mark.asyncio()
async def test_stream_no_context():
h = WorkflowHan... |
from typing import Dict, Optional, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal._typing import is_tensor_union, is_type_tensor
from docarray.utils._internal.misc import is_tf_available
tf_available... | from typing import Dict, Optional, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._typing import is_tensor_union, is_type_tensor
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()... |
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING: # pragma: no cover
from docarray.document.pydantic_model import PydanticDocumentArray
from docarray.typing import T
from pydantic import BaseModel
class PydanticMixin:
@classmethod
def get_json_schema(cls, indent: int = 2) -> str:
... | from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.document.pydantic_model import PydanticDocumentArray
from docarray.typing import T
from pydantic import BaseModel
class PydanticMixin:
@classmethod
def get_json_schema(cls, indent: int = 2) -> str:
"""Return a J... |
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import sys
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_tran... | """
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import sys
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_tran... |
"""JSON node parser."""
import json
from typing import Any, Dict, Generator, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.c... | """JSON node parser."""
import json
from typing import Any, Dict, Generator, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.co... |
import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import Dataset
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
backend_device_c... | import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import Dataset
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
execute_subproce... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
# student
ba... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
# student
ba... |
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform, extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
SAMP... | import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_C... |
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
import torch
from torchvision._utils import sequence_to_str
from torchvision.prototype import datapoints
from torchvision.prototype.datapoints._datapoint import Datapoint
from torchvision.prototype.transfor... | from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision._utils import sequence_to_str
from torchvision.prototype import features
from torchvision.prototype.transforms.functional import get_dimensions, get_spatial_size
def query_bounding_box(flat_inputs: List[Any]) -> features.Bo... |
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
... | from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
... |
PREFIX = """You are an agent designed to answer questions about sets of documents.
You have access to tools for interacting with the documents, and the inputs to the tools are questions.
Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.
If th... | # flake8: noqa
PREFIX = """You are an agent designed to answer questions about sets of documents.
You have access to tools for interacting with the documents, and the inputs to the tools are questions.
Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... |
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
ga... | _base_ = './paa_r50_fpn_1x_coco.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
... |
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
from typing import Optional
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (
dump_migra... | # type: ignore
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
from typing import Optional
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (... |
from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union, Any
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from... | from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union, Any
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from... |
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (apply_to, check_prerequisites, concat_list,
deprecated_api_warning, deprecated_function,
get_object_from_string, has_method,
import_modules_from_stri... | # Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (apply_to, check_prerequisites, concat_list,
deprecated_api_warning, deprecated_function,
get_object_from_string, has_method,
import_modules_from_stri... |
from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any
from docarray.document.data import DocumentData
from docarray.document.mixins import AllMixins
from docarray.base import BaseDCType
from docarray.math.ndarray import detach_tensor_if_present
if TYPE_CHECKING: # pragma: no cover
from ... | from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any
from docarray.document.data import DocumentData
from docarray.document.mixins import AllMixins
from docarray.base import BaseDCType
from docarray.math.ndarray import detach_tensor_if_present
if TYPE_CHECKING:
from docarray.typing impo... |
import math
import os
import pytest
import torch
from torchvision.io import _HAS_GPU_VIDEO_DECODER, VideoReader
try:
import av
except ImportError:
av = None
VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos")
@pytest.mark.skipif(_HAS_GPU_VIDEO_DECODER is False, reason="... | import math
import os
import pytest
import torch
import torchvision
from torchvision import _HAS_GPU_VIDEO_DECODER
from torchvision.io import VideoReader
try:
import av
except ImportError:
av = None
VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos")
@pytest.mark.skipif... |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from terminaltables import AsciiTable
from mmdet.models import dense_heads
from mmdet.models.dense_heads import * # noqa: F401,F403
def test_dense_heads_test_attr():
"""Tests inference methods such as simple_test and aug_test."""
# make list o... | import warnings
from terminaltables import AsciiTable
from mmdet.models import dense_heads
from mmdet.models.dense_heads import * # noqa: F401,F403
def test_dense_heads_test_attr():
"""Tests inference methods such as simple_test and aug_test."""
# make list of dense heads
exceptions = ['FeatureAdaption... |
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.... | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=... |
from __future__ import annotations
from enum import Enum
from typing import Any, Optional, Tuple, Union
import torch
from ._datapoint import Datapoint
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""... | from __future__ import annotations
from enum import Enum
from typing import Any, Optional, Tuple, Union
import torch
from ._datapoint import Datapoint
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocesso... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocesso... |
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
A... | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
A... |
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Im... | from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Im... |
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from langchain_core.callbacks import Callbacks
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallb... | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from langchain_core.callbacks import Callbacks
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallb... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
from collections import OrderedDict
import torch
from mmengine.runner import CheckpointLoader
convert_dict_fpn = {
'module.backbone.fpn.fpn_inner2': 'neck.lateral_convs.0.conv',
'module.backbone.fpn.fpn_inner3': 'neck.lateral_co... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
from collections import OrderedDict
import torch
from mmengine.runner import CheckpointLoader
convert_dict_fpn = {
'module.backbone.fpn.fpn_inner2': 'neck.lateral_convs.0.conv',
'module.backbone.fpn.fpn_inner3': 'neck.lateral_co... |
"""Extra array functions built on top of the array API standard."""
from ._delegation import isclose, pad
from ._lib._at import at
from ._lib._funcs import (
apply_where,
atleast_nd,
broadcast_shapes,
cov,
create_diagonal,
expand_dims,
kron,
nunique,
setdiff1d,
sinc,
)
from ._li... | """Extra array functions built on top of the array API standard."""
from ._delegation import isclose, pad
from ._lib._at import at
from ._lib._funcs import (
apply_where,
atleast_nd,
broadcast_shapes,
cov,
create_diagonal,
expand_dims,
kron,
nunique,
setdiff1d,
sinc,
)
from ._li... |
import types
from typing import TYPE_CHECKING
from docarray.store.file import FileDocStore
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.store.s3 import S3DocStore # noqa: F401
__all__ = ['FileDocStore']
def __getattr_... | import types
from typing import TYPE_CHECKING
from docarray.store.file import FileDocStore
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.store.jac import JACDocStore # noqa: F401
from docarray.store.s3 import S3DocSto... |
_base_ = './queryinst_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
... | _base_ = './queryinst_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, ... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
n... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
n... |
"""
In SecGPT, if the hub planner determines that a user query can be addressed solely by an LLM, it utilizes a non-collaborative vanilla spoke, which operates without awareness of other system functionalities.
"""
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import Settings
from llama_ind... | """
In SecGPT, if the hub planner determines that a user query can be addressed solely by an LLM, it utilizes a non-collaborative vanilla spoke, which operates without awareness of other system functionalities.
"""
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import Settings
from llama_ind... |
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Rescaling")
class Rescaling(TFDataLayer):
"""A preprocessing layer which rescales input values... | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
@keras_export("keras.layers.Rescaling")
class Rescaling(TFDataLayer):
"""A preprocessing layer which rescales input values to a new range.
This layer rescales every... |
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.typing import T
from docarray.document.strawberry_type import StrawberryDocument
class StrawberryMixin:
def to_strawberry_type(self) -> List['StrawberryDocument']:
"""Convert a DocumentArray object into a Pydantic model.... | from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from ...typing import T
from ...document.strawberry_type import StrawberryDocument
class StrawberryMixin:
def to_strawberry_type(self) -> List['StrawberryDocument']:
"""Convert a DocumentArray object into a Pydantic model."""
... |
import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.y... | import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.y... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.parallel import is_module_wrapper
from mmcv.runner import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"""Set runner's epoch information to the model."""
def before_train_epoch(self, runner):
ep... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.parallel import is_module_wrapper
from mmcv.runner import HOOKS, Hook
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"""Set runner's epoch information to the model."""
def before_train_epoch(self, runner):
epoch = runner.epoch
... |
import os
import time
import uuid
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v... | import os
import time
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v7 = os.path.... |
import json
import os
from typing import List
import torch
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: List[int] = [... | import torch
from torch import nn
from typing import List
import os
import json
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: List[int] = [1... |
from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, ... | from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, ... |
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocVec
from docarray.typing import ImageUrl, NdArray
def test_optional():
class Features(BaseDoc):
tensor: NdArray[100]
class Image(BaseDoc):
url: ImageUrl
features: Optional[Features] = None
docs = Do... | from typing import Optional
import numpy as np
from docarray import BaseDoc, DocVec
from docarray.typing import ImageUrl, NdArray
def test_optional():
class Features(BaseDoc):
tensor: NdArray[100]
class Image(BaseDoc):
url: ImageUrl
features: Optional[Features] = None
docs = Do... |
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
... | """Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
... |
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch... | """Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch... |
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
... | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[... |
import os
import time
from jina import Executor, requests, DocumentArray
class SlowProcessExecutor(Executor):
def __init__(self, time_sleep=1.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.time_sleep = time_sleep
@requests
def process(self, docs: DocumentArray, *args, **kwar... | import os
import time
from jina import Executor, requests, DocumentArray
class SlowProcessExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from jina.logging.logger import JinaLogger
self.logger = JinaLogger(self.__class__.__name__)
@requests... |
import os
import grpc
import pytest
from jina import Flow, __default_host__
from jina.clients import Client
from jina.excepts import PortAlreadyUsed
from jina.helper import is_port_free
from jina.serve.runtimes.gateway.grpc import GRPCGateway
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGat... | import os
import grpc
import pytest
from jina import Flow, __default_host__
from jina.clients import Client
from jina.excepts import PortAlreadyUsed
from jina.helper import is_port_free
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGatewayRuntime
from jina.serve.runtimes.helper import _get_g... |
_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='PISARoIHead',
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
train_cfg=dict(
rpn_proposal=dict(
nms_pre=2000,
max_per_img... | _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='PISARoIHead',
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
train_cfg=dict(
rpn_proposal=dict(
nms_pre=2000,
max_per_img... |
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to... | # Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to... |
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotCompanyBlock(Bl... | from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotCompanyBlock(Bl... |
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils... | from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils... |
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_image, decode_jpeg, decode_webp, read_file
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
pr... | """Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_image, decode_jpeg, decode_webp, read_file
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
pr... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ExtractHyperlinksTool
from langchain_community.tools.playwright.extract_hyperlinks import (
ExtractHyperlinksToolInput,
)
# Create a way to dynamically look up ... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ExtractHyperlinksTool
from langchain_community.tools.playwright.extract_hyperlinks import (
ExtractHyperlinksToolInput,
)
# Create a way to dynamically look up ... |
# coding=utf-8
# Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
... | # coding=utf-8
# Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
... |
import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_reque... | import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_reque... |
import re
import torch
from torch.utils.hipify.hipify_python import PYTORCH_MAP, PYTORCH_TRIE
# It is not a good idea to directly apply hipify_torch to codegen, which will be vulnerable to cases like:
# "...
# from ..codecache import CudaKernelParamCache
# ..."
# In such cases, we do not need to hipify_torch ... | import re
import torch
from torch.utils.hipify.hipify_python import PYTORCH_MAP, PYTORCH_TRIE
# It is not a good idea to directly apply hipify_torch to codegen, which will be vulnerable to cases like:
# "...
# from ..codecache import CudaKernelParamCache
# ..."
# In such cases, we do not need to hipify_torch ... |
"""Graph Database Cypher Reader."""
from typing import Dict, List, Optional
import yaml
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GraphDBCypherReader(BaseReader):
"""
Graph database Cypher reader.
Combines all Cypher query results into the D... | """Graph Database Cypher Reader."""
from typing import Dict, List, Optional
import yaml
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GraphDBCypherReader(BaseReader):
"""Graph database Cypher reader.
Combines all Cypher query results into the Docume... |
from typing import Any, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import Mo... | from typing import Any, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import Mo... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.