input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
#!/usr/bin/env python3
"""
This script should use a very simple, functional programming style.
Avoid Jinja macros in favor of native Python functions.
Don't go overboard on code generation; use Python only to generate
content that can't be easily declared statically using CircleCI's YAML API.
Data declarations (e.g.... | #!/usr/bin/env python3
"""
This script should use a very simple, functional programming style.
Avoid Jinja macros in favor of native Python functions.
Don't go overboard on code generation; use Python only to generate
content that can't be easily declared statically using CircleCI's YAML API.
Data declarations (e.g.... |
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Optional
from mmengine.structures import InstanceData
class BaseAssigner(metaclass=ABCMeta):
"""Base assigner that assigns boxes to ground truth boxes."""
@abstractmethod
def assign(self,
... | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Optional
from mmengine.data import InstanceData
class BaseAssigner(metaclass=ABCMeta):
"""Base assigner that assigns boxes to ground truth boxes."""
@abstractmethod
def assign(self,
... |
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py... | _base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_co... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint... | import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, O... |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions... | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions... |
import os
import numpy as np
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.... | import os
import numpy as np
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.... |
"""Query Rewriting Retriever Pack."""
from typing import Any, Dict, List
from llama_index.core import Settings
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.c... | """Query Rewriting Retriever Pack."""
from typing import Any, Dict, List
from llama_index.core import Settings
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.c... |
import io
import warnings
from abc import ABC
import numpy as np
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
... | import io
import warnings
from abc import ABC
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image... |
from torchaudio._internal import module_utils as _mod_utils
from . import sox_utils
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
__all__ = [
"download_asset",
"sox_utils",
]
| from torchaudio._internal import module_utils as _mod_utils
from . import (
sox_utils,
)
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
__all__ = [
"download_asset",
"sox_utils",
]
|
from .transform_encoder import TransformerTorchEncoder
| from .transform_encoder import TransformerTorchEncoder |
from typing import Optional, TYPE_CHECKING
import numpy as np
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _is_datauri
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class ConvertMixin:
"""Provide helper functions for :class:`Document` to support conversion be... | from typing import Optional, TYPE_CHECKING
import numpy as np
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _is_datauri
if TYPE_CHECKING:
from docarray.typing import T
class ConvertMixin:
"""Provide helper functions for :class:`Document` to support conversion between :attr:`.tensor... |
# Copyright (c) OpenMMLab. All rights reserved.
from enum import Enum
from typing import Union
class Priority(Enum):
"""Hook priority levels.
+--------------+------------+
| Level | Value |
+==============+============+
| HIGHEST | 0 |
+--------------+------------+
... | # Copyright (c) OpenMMLab. All rights reserved.
from enum import Enum
from typing import Union
class Priority(Enum):
"""Hook priority levels.
+--------------+------------+
| Level | Value |
+==============+============+
| HIGHEST | 0 |
+--------------+------------+
... |
"""Evaluator."""
from abc import abstractmethod
from typing import Any, Optional, Sequence
from llama_index.core.async_utils import asyncio_run
from llama_index.core.base.response.schema import Response
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.prompts.mixin import PromptMixin... | """Evaluator."""
from abc import abstractmethod
from typing import Any, Optional, Sequence
from llama_index.core.async_utils import asyncio_run
from llama_index.core.base.response.schema import Response
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.prompts.mixin import PromptMixin... |
from __future__ import annotations
from .model_card import SparseEncoderModelCardData
from .SparseEncoder import SparseEncoder
from .trainer import SparseEncoderTrainer
from .training_args import SparseEncoderTrainingArguments
__all__ = [
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArgu... | from __future__ import annotations
from .model_card import SparseEncoderModelCardData
from .SparseEncoder import SparseEncoder
from .trainer import SparseEncoderTrainer
from .training_args import SparseEncoderTrainingArguments
__all__ = [
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArgu... |
"""Provides the PanelChatPack."""
import os
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
ENVIRONMENT_VARIABLES = [
"GITHUB_TOKEN",
"OPENAI_API_KEY",
]
class PanelChatPack(BaseLlamaPack):
"""Panel chatbot pack."""
def get_modules(self) -> Dict[str, Any]:
... | """Provides the PanelChatPack."""
import os
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
ENVIRONMENT_VARIABLES = [
"GITHUB_TOKEN",
"OPENAI_API_KEY",
]
class PanelChatPack(BaseLlamaPack):
"""Panel chatbot pack."""
def get_modules(self) -> Dict[str, Any]:
... |
from typing import Any
import torch
import enum
from torch._C import _to_dlpack as to_dlpack
__all__ = [
"DLDeviceType",
"from_dlpack",
]
class DLDeviceType(enum.IntEnum):
# Enums as in DLPack specification (aten/src/ATen/dlpack.h)
kDLCPU = 1,
kDLCUDA = 2,
kDLCUDAHost = 3,
kDLOpenCL = 4,... | from typing import Any
import torch
import enum
from torch._C import _from_dlpack
from torch._C import _to_dlpack as to_dlpack
__all__ = [
"DLDeviceType",
"from_dlpack",
"to_dlpack",
]
class DLDeviceType(enum.IntEnum):
# Enums as in DLPack specification (aten/src/ATen/dlpack.h)
kDLCPU = 1,
... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable... | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable... |
"""
NumPy Array API compatibility library
This is a small wrapper around NumPy, CuPy, JAX, sparse and others that are
compatible with the Array API standard https://data-apis.org/array-api/latest/.
See also NEP 47 https://numpy.org/neps/nep-0047-array-api-standard.html.
Unlike array_api_strict, this is not a strict m... | """
NumPy Array API compatibility library
This is a small wrapper around NumPy, CuPy, JAX, sparse and others that are
compatible with the Array API standard https://data-apis.org/array-api/latest/.
See also NEP 47 https://numpy.org/neps/nep-0047-array-api-standard.html.
Unlike array_api_strict, this is not a strict m... |
import json
from typing import List
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
class XataChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Xata database."""
... | import json
from typing import List
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
class XataChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Xata database."""
... |
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredODTLoader(UnstructuredFileLoader):
"""Load `OpenOffice ODT` files using `Unstructured`.
You can run ... | from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredODTLoader(UnstructuredFileLoader):
"""Load `OpenOffice ODT` files using `Unstructured`.
You can run ... |
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import dataclass, fields
from typing import TypeVar
from typing_extensions import dataclass_transform
T = TypeVar("T", bound="_Union")
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmeth... | # mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import fields
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
... |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... | """**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... |
import os
from pathlib import Path
import pytest
from pytest_kind import KindCluster, cluster
from jina.logging.logger import JinaLogger
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
# The default version broke cni at some point. That's why we need to specify the version here.
# This can and probably ... | import os
from pathlib import Path
import pytest
from pytest_kind import KindCluster, cluster
from jina.logging.logger import JinaLogger
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
# The default version broke cni at some point. That's why we need to specify the version here.
# This can and probably ... |
import os
import numpy as np
from PIL import Image
from docarray import Document
from docarray.dataclasses.getter import (
audio_getter,
image_getter,
json_getter,
text_getter,
uri_getter,
)
from docarray.dataclasses.enums import DocumentMetadata, ImageType
cur_dir = os.path.dirname(os.path.abspa... | import os
import numpy as np
from PIL import Image
from docarray import Document
from docarray.dataclasses.getter import (
audio_getter,
image_getter,
json_getter,
text_getter,
uri_getter,
)
cur_dir = os.path.dirname(os.path.abspath(__file__))
IMAGE_URI = os.path.join(cur_dir, 'toydata/test.png'... |
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache # noqa F401
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pand... | import inspect
import re
from typing import Dict, List
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache # noqa F401
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from... |
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
fro... | from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
fro... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestSingleStageDetector(TestCase):
@param... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestSingleStageDetector(TestCase):
@param... |
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
from sentence_transformers.cross_encoder import CrossEncoder
import numpy as np
# Pre-trained cross... | """
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
from sentence_transformers.cross_encoder import CrossEncoder
import numpy as np
# Pre-trained cross ... |
from typing import Any
from typing_extensions import Self
import torch
import torch.ao.nn.intrinsic as nni
import torch.ao.nn.quantized.dynamic as nnqd
__all__ = ["LinearReLU"]
class LinearReLU(nnqd.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules that can be used
for dynamic quanti... | # mypy: allow-untyped-defs
import torch
import torch.ao.nn.intrinsic as nni
import torch.ao.nn.quantized.dynamic as nnqd
__all__ = ["LinearReLU"]
class LinearReLU(nnqd.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules that can be used
for dynamic quantization.
Supports both, FP16 ... |
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDocument, DocumentArray, DocumentArrayStacked
from docarray.typing import TorchTensor
from docarray.utils._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDocument)... | from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDocument, DocumentArray
from docarray.typing import TorchTensor
from docarray.utils._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDocument)
class MultiModalDat... |
import json
import os
import pickle
import tempfile
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
kRows = 100
kCols = 10
def generate_data():
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
return X, y
class TestPickling:
def run_model_pickl... | import json
import os
import pickle
import numpy as np
import xgboost as xgb
kRows = 100
kCols = 10
def generate_data():
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
return X, y
class TestPickling:
def run_model_pickling(self, xgb_params) -> str:
X, y = generate_data()
... |
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json():
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], t... | from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json():
da = DocArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5],... |
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_CPU_VIDEO_DECODER,
_HAS_VIDEO_OPT,
_probe_video_from_fi... | from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_me... |
__version__ = '0.14.12'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.14.11'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import nn
from sentence_transformers.models.Module import Module
class LSTM(Module):
"""Bidirectional LSTM running over word embeddings."""
config_keys: li... | from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def ... |
import json
import logging
import os
from collections import defaultdict
from pathlib import Path
from huggingface_hub import HfApi
import diffusers
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
ALWAYS_TEST_PIPELINE_MODULES = [
"controlnet",
"controlnet_flux",
"controlnet_sd3",
"stable_diffu... | import json
import logging
import os
from collections import defaultdict
from pathlib import Path
from huggingface_hub import HfApi
import diffusers
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
ALWAYS_TEST_PIPELINE_MODULES = [
"controlnet",
"stable_diffusion",
"stable_diffusion_2",
"stable_... |
from typing import Optional
from llama_index.core.base.llms.types import ChatMessage
from typing_extensions import NotRequired, TypedDict
XINFERENCE_MODEL_SIZES = {
"baichuan": 2048,
"baichuan-chat": 2048,
"wizardlm-v1.0": 2048,
"vicuna-v1.3": 2048,
"orca": 2048,
"chatglm": 2048,
"chatglm2... | from typing import Optional
from llama_index.core.base.llms.types import ChatMessage
from typing_extensions import NotRequired, TypedDict
XINFERENCE_MODEL_SIZES = {
"baichuan": 2048,
"baichuan-chat": 2048,
"wizardlm-v1.0": 2048,
"vicuna-v1.3": 2048,
"orca": 2048,
"chatglm": 2048,
"chatglm2... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg16 import VGG16 as VGG16
from keras.src.applications.vgg16 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.vgg16 import preprocess... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg16 import VGG16
from keras.src.applications.vgg16 import decode_predictions
from keras.src.applications.vgg16 import preprocess_input
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS
from ...core import bbox_cxcywh_to_xyxy
@HEADS.register_module()
class EmbeddingRPNHead(BaseModule):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/... | import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS
from ...core import bbox_cxcywh_to_xyxy
@HEADS.register_module()
class EmbeddingRPNHead(BaseModule):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
Unlike traditional RPNHead,... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.18.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.17.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class RuntimeInfoHook(Hook):
"""A hook that updates runtime information into message hub.
E.g... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class RuntimeInfoHook(Hook):
"""A hook that updates runtime information into message hub.
E.g... |
"""Tests for the Google Cloud DocAI parser."""
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.document_loaders.parsers import (
AzureAIDocumentIntelligenceParser,
)
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligen... | """Tests for the Google Cloud DocAI parser."""
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.document_loaders.parsers import (
AzureAIDocumentIntelligenceParser,
)
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligen... |
"""
Tests for sklearn.cluster._feature_agglomeration
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.cluster import FeatureAgglomeration
from sklearn.datasets import make_blobs
from sklearn.utils._testing import assert_array_almost_equal
def test_feature_agglomeration():
n_clust... | """
Tests for sklearn.cluster._feature_agglomeration
"""
import warnings
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.cluster import FeatureAgglomeration
from sklearn.datasets import make_blobs
from sklearn.utils._testing import assert_array_almost_equal
def test_featu... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | """
this test check the docstring of all of our public API. It does it
by checking the `__all__` of each of our namespace.
to add a new namespace you need to
* import it
* add it to the `SUB_MODULE_TO_CHECK` list
"""
import pytest
from mktestdocs import check_docstring, get_codeblock_members
import docarray.data
imp... |
try:
from docarray import BaseDoc as Document
from docarray import DocList as DocumentArray
docarray_v2 = True
from jina._docarray_legacy import LegacyDocumentJina
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
import pydantic
is_pydantic_v2 = pydanti... | try:
from docarray import BaseDoc as Document
from docarray import DocList as DocumentArray
docarray_v2 = True
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
|
from llama_index.core import Document, MockEmbedding
from llama_index.core.llms import MockLLM
from llama_index.packs.raptor.base import RaptorRetriever
def test_raptor() -> None:
retriever = RaptorRetriever(
[
Document(text="one"),
Document(text="two"),
Document(text="... | from llama_index.core import Document, MockEmbedding, global_tokenizer
from llama_index.core.llms import MockLLM
from llama_index.packs.raptor.base import RaptorRetriever
import pytest
@pytest.mark.skipif(
condition=(global_tokenizer is None), reason="No global tokenizer set"
)
def test_raptor() -> None:
retr... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataSample
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.regist... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataSample
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.regist... |
import requests as req
from docarray import DocumentArray
from prometheus_client import Summary
from jina import Executor, Flow, monitor, requests
def test_prometheus_interface(port_generator):
class DummyExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwarg... | import requests as req
from docarray import DocumentArray
from prometheus_client import Summary
from jina import Executor, Flow, monitor, requests
def test_prometheus_interface(port_generator):
class DummyExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwarg... |
import asyncio
from typing import TYPE_CHECKING, Optional, Tuple
import grpc
from jina.clients.base.retry import wait_or_raise_err
from jina.clients.helper import callback_exec
from jina.excepts import InternalNetworkError
from jina.proto import jina_pb2_grpc
from jina.serve.stream import RequestStreamer
if TYPE_CHE... | import asyncio
from typing import TYPE_CHECKING, Optional, Tuple
import grpc
from jina.clients.base.retry import wait_or_raise_err
from jina.clients.helper import callback_exec
from jina.excepts import InternalNetworkError
from jina.proto import jina_pb2_grpc
from jina.serve.stream import RequestStreamer
if TYPE_CHE... |
"""Toolkit for interacting with a vector store."""
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_core.vectorstores import VectorStore
from pydantic import BaseModel, ConfigDict, Field
class Vecto... | """Toolkit for interacting with a vector store."""
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_core.vectorstores import VectorStore
from pydantic import BaseModel, ConfigDict, Field
class Vecto... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import AzureCosmosDBVectorSearch
from langchain_community.vectorstores.azure_cosmos_db import CosmosDBSimilarityType
# Create a way to dynamically look up deprecated import... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import AzureCosmosDBVectorSearch
from langchain_community.vectorstores.azure_cosmos_db import CosmosDBSimilarityType
# Create a way to dynamically look up deprecated import... |
from typing import Optional, TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local... | from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import TEXT_FILE_FORMATS
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields imp... |
from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
... | from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_in... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_in... |
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
sty... | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
sty... |
# model settings
model = dict(
type='FastRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_... | # model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FastRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(... |
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
... | import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
... |
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray import Document
from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'... | from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ... import Document
from .lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'ex... |
import os
from pathlib import Path
from torchaudio.datasets import yesno
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
def get_mock_data(root_dir, labels):
"""
root_dir: path
labels: list of labels
"""
mocked_data = []
b... | import os
from pathlib import Path
from torchaudio.datasets import yesno
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_data(root_dir, labels):
"""
root_dir: path
labels: list of labels
"""
... |
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result... | import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result... |
from docarray.documents.text import TextDoc
def test_text_document_operators():
doc = TextDoc(text='text', url='url.com')
assert doc == 'text'
assert doc != 'url.com'
doc2 = TextDoc(id=doc.id, text='text', url='url.com')
assert doc == doc2
doc3 = TextDoc(id='other-id', text='text', url='ur... | from docarray.documents.text import Text
def test_text_document_operators():
doc = Text(text='text', url='url.com')
assert doc == 'text'
assert doc != 'url.com'
doc2 = Text(id=doc.id, text='text', url='url.com')
assert doc == doc2
doc3 = Text(id='other-id', text='text', url='url.com')
... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from .generation import LLaMA
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_ar... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_ar... |
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
d... | # dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
d... |
import itertools
import numpy as np
from absl.testing import parameterized
from keras.src import ops
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bo... | import itertools
import numpy as np
from absl.testing import parameterized
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters im... |
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from box_sdk_gen import (
BoxClient,
)
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_ai_response_from_box_files,
add_extra_header_to_b... | from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from box_sdk_gen import (
BoxClient,
)
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_ai_response_from_box_files,
add_extra_header_to_b... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .convfc_bbox_head import ConvFCBBoxHead
@MODELS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() ... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from .convfc_bbox_head import ConvFCBBoxHead
@HEADS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forwar... |
import numpy as np
import pytest
from docarray import DocumentArray, Document
from docarray.array.storage.base.helper import Offset2ID
@pytest.fixture(scope='function')
def docs():
d1 = Document(embedding=np.array([10, 0]))
d2 = Document(embedding=np.array([0, 10]))
d3 = Document(embedding=np.array([-10,... | import numpy as np
import pytest
from docarray import DocumentArray, Document
from docarray.array.storage.base.helper import Offset2ID
@pytest.fixture(scope='function')
def docs():
d1 = Document(embedding=np.array([10, 0]))
d2 = Document(embedding=np.array([0, 10]))
d3 = Document(embedding=np.array([-10,... |
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class Sparse... | from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class Sparse... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.human import (
AsyncHumanApprovalCallbackHandler,
HumanApprovalCallbackHandler,
HumanRejectedException,
)
# Create a way to dynamically look up depreca... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.human import (
AsyncHumanApprovalCallbackHandler,
HumanApprovalCallbackHandler,
HumanRejectedException,
)
# Create a way to dynamically look up depreca... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check ... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Ch... |
def __getattr__(name: str):
import warnings
warnings.warn(
"Torchaudio's I/O functions now support per-call backend dispatch. "
"Importing backend implementation directly is no longer guaranteed to work. "
"Please use `backend` keyword with load/save/info function, instead of "
... | def __getattr__(name: str):
import warnings
warnings.warn(
"Torchaudio's I/O functions now support par-call bakcend dispatch. "
"Importing backend implementation directly is no longer guaranteed to work. "
"Please use `backend` keyword with load/save/info function, instead of "
... |
import os
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
... | import os
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
... |
import wave
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class AudioDataMixin:
"""Provide helper functions for :class:`Document` to support audio data."""
def save_audio_tensor_to_file(
self: 'T',
... | import wave
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class AudioDataMixin:
"""Provide helper functions for :class:`Document` to support audio data."""
def save_audio_tensor_to_file(
self: 'T',
file: Union[str, B... |
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedI... | from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision... |
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zero... | import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zero... |
# mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import copy
import queue
import torch
from torch._utils impo... | # mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import copy
import queue
import torch
from torch._utils impo... |
from .basic import BasicTextNormalizer as BasicTextNormalizer
from .english import EnglishTextNormalizer as EnglishTextNormalizer
| from .basic import BasicTextNormalizer
from .english import EnglishTextNormalizer
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_ar... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_ar... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | # Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... |
import pickle
from dataclasses import dataclass
from io import BufferedIOBase
from typing import Any
import torch
import torch._weights_only_unpickler as _weights_only_unpickler
from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION
__all__: list[str] = []
@dataclass
class _Entry:
key: st... | import pickle
from dataclasses import dataclass
from io import BufferedIOBase
from typing import Any
import torch
import torch._weights_only_unpickler as _weights_only_unpickler
from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION
__all__: list[str] = []
@dataclass
class _Entry:
key: st... |
from fastapi import FastAPI
app = FastAPI(swagger_ui_parameters={"syntaxHighlight": {"theme": "obsidian"}})
@app.get("/users/{username}")
async def read_user(username: str):
return {"message": f"Hello {username}"}
| from fastapi import FastAPI
app = FastAPI(swagger_ui_parameters={"syntaxHighlight.theme": "obsidian"})
@app.get("/users/{username}")
async def read_user(username: str):
return {"message": f"Hello {username}"}
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_simcse_from_file.py path/to/sentences.txt
"""
import gzi... | """
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_simcse_from_file.py path/to/sentences.txt
"""
from torch... |
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_g... | """
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_g... |
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python trai... | """
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python trai... |
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
w... | """
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
w... |
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
import pytest
from mmengine.registry import DefaultScope
class TestDefaultScope:
def test_scope(self):
default_scope = DefaultScope.get_instance('name1', scope_name='mmdet')
assert default_scope.scope_name == 'm... | # Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
import pytest
from mmengine.registry import DefaultScope
class TestDefaultScope:
def test_scope(self):
default_scope = DefaultScope.get_instance('name1', scope_name='mmdet')
assert default_scope.scope_name == 'm... |
"""Test PandasDataframeParser"""
from typing import Any
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
... | """Test PandasDataframeParser"""
from typing import Any
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
... |
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# ... | # dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend... |
from typing import Dict
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluato... | from typing import Dict
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluato... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmengine.fileio import dump, list_from_file
from mmengine.utils import mkdir_or_exist, scandir, track_iter_progress
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Convert images to coco ... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import mmcv
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Convert images to coco format without annotations')
parser.add_argument('img_path', help='The root path of images')
parser.a... |
from pydantic import BaseModel
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchema,
BlockWebhookConfig,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.util import settings
from backend.util.settings impor... | from pydantic import BaseModel
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchema,
BlockWebhookConfig,
)
from backend.data.model import SchemaField
from backend.util import settings
from backend.util.settings import AppEnvironment, BehaveAs
from ._api import (
TEST... |
#!/usr/bin/env python3
"""This is the preprocessing script for HuBERT model training.
The script includes:
- File list creation
- MFCC/HuBERT feature extraction
- KMeans clustering model training
- Pseudo-label generation
"""
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from ... | #!/usr/bin/env python3
"""This is the preprocessing script for HuBERT model training.
The script includes:
- File list creation
- MFCC/HuBERT feature extraction
- KMeans clustering model training
- Pseudo-label generation
"""
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from ... |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... | """**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... |
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina... | import asyncio
import json
import multiprocessing
import threading
import time
from collections import defaultdict
import pytest
from jina import Client, Document, Executor, requests
from jina.enums import PollingType
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.asyncio import ... |
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchSyncRandomResize,
DetDataPreprocessor)
__all__ = ['DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad']
| # Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import BatchSyncRandomResize, DetDataPreprocessor
__all__ = ['DetDataPreprocessor', 'BatchSyncRandomResize']
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='PAA',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=prepr... |
"""Init file of LlamaIndex."""
__version__ = "0.12.16"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... | """Init file of LlamaIndex."""
__version__ = "0.12.15"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.