input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.hooks import Hook
from mmengine.model.wrappers import is_model_wrapper
from mmdet.registry import HOOKS
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"""Set runner's epoch information to the model."""
def before_train_epoch(self, runner)... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.parallel import is_module_wrapper
from mmcv.runner import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"""Set runner's epoch information to the model."""
def before_train_epoch(self, runner):
ep... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class ChannelMapper(BaseModule):
r"""Channel Mapper to reduce/increase channels of backbone features.
This is u... | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class ChannelMapper(BaseModule):
r"""Channel Mapper to reduce/increase channels of backbone features.
This is used to ... |
import asyncio
import json
from typing import Optional, Union
from unittest import mock
import pytest
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.w... | import asyncio
from unittest import mock
from typing import Union, Optional
import pytest
from llama_index.core.workflow.workflow import (
Workflow,
Context,
)
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError
from llama_index.core.workflow.... |
import numpy as np
import pytest
import torch
from docarray import BaseDoc
from docarray.typing import AnyTensor, NdArray, TorchTensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.nump... | import numpy as np
import pytest
import torch
from docarray import BaseDoc
from docarray.typing import AnyTensor, NdArray, TorchTensor
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp ... |
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level... | """
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize as deserialize
from keras.src.initializers import get as get
from keras.src.initializers import serialize as serialize
from keras.src.initializers.cons... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import STFT
from ... |
"""**Tracers** are classes for tracing runs.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> BaseTracer --> <name>Tracer # Examples: LangChainTracer, RootListenersTracer
--> <name> # Examples: LogStreamCallbackHandler
""" # noqa: E501
from typing import TYP... | """**Tracers** are classes for tracing runs.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> BaseTracer --> <name>Tracer # Examples: LangChainTracer, RootListenersTracer
--> <name> # Examples: LogStreamCallbackHandler
""" # noqa: E501
from importlib import ... |
from autogpt_libs.auth.depends import requires_user
from autogpt_libs.auth.models import User
from fastapi import Depends
from backend.util.settings import Settings
settings = Settings()
def get_user_id(user: User = Depends(requires_user)) -> str:
return user.user_id
| from autogpt_libs.auth.middleware import auth_middleware
from fastapi import Depends, HTTPException
from backend.data.user import DEFAULT_USER_ID
from backend.util.settings import Settings
settings = Settings()
def get_user_id(payload: dict = Depends(auth_middleware)) -> str:
if not payload:
# This hand... |
import time
from functools import partial
from huggingface_hub import HfApi, hf_hub_url
from packaging import version
from requests import ConnectionError, HTTPError
from .. import config
from . import logging
logger = logging.get_logger(__name__)
# Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "5... | import time
from functools import partial
from huggingface_hub import HfApi, hf_hub_url
from packaging import version
from requests import HTTPError
from .. import config
from . import logging
logger = logging.get_logger(__name__)
# Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Serv... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.sagemaker_callback import (
SageMakerCallbackHandler,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation w... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.sagemaker_callback import (
SageMakerCallbackHandler,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation w... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_head
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>... | import torch
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_head
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
b... |
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import IS_CI, run_tests
from torch.testin... | #!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import IS_CI, run_tests
from torch.testin... |
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_p... | """Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_p... |
import base64
import hashlib
from datetime import datetime, timedelta, timezone
import os
import os
import jwt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
Encoding,
PublicFormat,
load_pem_private_key,
)
SPCS_TOKEN_PATH = "/snowflake/se... | import base64
import hashlib
from datetime import datetime, timedelta, timezone
import os
import jwt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
Encoding,
PublicFormat,
load_pem_private_key,
)
SPCS_TOKEN_PATH = "/snowflake/session/toke... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
from mmengine.logging import print_log
from mmengine.utils import digit_version
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('i... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
from mmengine.logging import print_log
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename'... |
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc... | # THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc... |
from ._transforms import BarkScale, BarkSpectrogram, Convolve, FFTConvolve, InverseBarkScale, Speed, SpeedPerturbation
__all__ = [
"BarkScale",
"BarkSpectrogram",
"Convolve",
"FFTConvolve",
"InverseBarkScale",
"SpeedPerturbation",
"Speed",
]
| from ._transforms import BarkScale, BarkSpectrogram, Convolve, FFTConvolve, InverseBarkScale
__all__ = [
"BarkScale",
"BarkSpectrogram",
"Convolve",
"FFTConvolve",
"InverseBarkScale",
]
|
from __future__ import annotations
import os
import tempfile
def is_ci() -> bool:
"""
Check if the code is running in a Continuous Integration (CI) environment.
This is determined by checking for the presence of certain environment variables.
"""
return "GITHUB_ACTIONS" in os.environ
class Safe... | from __future__ import annotations
import tempfile
class SafeTemporaryDirectory(tempfile.TemporaryDirectory):
"""
The GitHub Actions CI on Windows sometimes raises a NotADirectoryError when cleaning up the temporary directory.
This class is a workaround to avoid the error.
Unlike tempfile.TemporaryD... |
from __future__ import annotations
import argparse
import os
import re
import subprocess
from pathlib import Path
from setuptools import distutils # type: ignore[import,attr-defined]
UNKNOWN = "Unknown"
RELEASE_PATTERN = re.compile(r"/v[0-9]+(\.[0-9]+)*(-rc[0-9]+)?/")
def get_sha(pytorch_root: str | Path) -> str... | from __future__ import annotations
import argparse
import os
import re
import subprocess
from pathlib import Path
from setuptools import distutils # type: ignore[import]
UNKNOWN = "Unknown"
RELEASE_PATTERN = re.compile(r"/v[0-9]+(\.[0-9]+)*(-rc[0-9]+)?/")
def get_sha(pytorch_root: str | Path) -> str:
try:
... |
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optim_wrapper = dict(
type='OptimWrappe... | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optim_wrapper = dict(
type='OptimWrappe... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pytest_mock import MockerFixture
from torch import hub
from ...torch_encoder import ImageTorchEncoder
def test_load_from_url(tmpdir: str, mocker: MockerFixture) -> None:
os.environ['TORCH_HOME']... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from torch import hub
from pytest_mock import MockerFixture
from ...torch_encoder import ImageTorchEncoder
def test_load_from_url(tmpdir: str, mocker: MockerFixture) -> None:
os.environ['TORCH_HOME'... |
from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ... | from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ... |
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
L... | from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
L... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
type='ATSS',
data_preprocessor=dict(
... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
type='ATSS',
data_preprocessor=dict(
... |
"""**Tracers** are classes for tracing runs.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> BaseTracer --> <name>Tracer # Examples: LangChainTracer, RootListenersTracer
--> <name> # Examples: LogStreamCallbackHandler
""" # noqa: E501
from importlib import ... | """**Tracers** are classes for tracing runs.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> BaseTracer --> <name>Tracer # Examples: LangChainTracer, RootListenersTracer
--> <name> # Examples: LogStreamCallbackHandler
""" # noqa: E501
from importlib import ... |
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import-untyped]
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from l... | """Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import-untyped]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputP... |
"""[DEPRECATED] Pipeline prompt template."""
from typing import Any
from pydantic import model_validator
from langchain_core._api.deprecation import deprecated
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import BaseC... | """[DEPRECATED] Pipeline prompt template."""
from typing import Any
from pydantic import model_validator
from langchain_core._api.deprecation import deprecated
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import BaseC... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
# model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=50,
... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
# model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=50,
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.baidu_qianfan_endpoint import (
QianfanChatEndpoint,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation ... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.baidu_qianfan_endpoint import (
QianfanChatEndpoint,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation ... |
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyEmbedding
@pytest.mark.proto
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding._to_n... | import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AnyEmbedding
@pytest.mark.proto
def test_proto_embedding():
embedding = parse_obj_as(AnyEmbedding, np.zeros((3, 224, 224)))
embedding.... |
_base_ = './mask-rcnn_x101-32x4d_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pyto... | _base_ = './mask_rcnn_x101_32x4d_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pyto... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import FileClient, load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import FileClient, load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset... |
from docarray import DocumentArray
from jina import Executor, requests
from jina.parsers import set_pod_parser
class ProcessExecutor(Executor):
@requests(on='/')
def process(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = doc.text + 'world'
doc.tags['processe... | from docarray import DocumentArray
from jina import Executor, requests
class ProcessExecutor(Executor):
@requests(on='/')
def process(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = doc.text + 'world'
doc.tags['processed'] = True
def _validate_dummy_custom_... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
prepr... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
import pytest
import spacy
from jina import Document, DocumentArray, Executor
from ...spacy_text_encoder import SpacyTextEncoder
def test_config():
ex = Executor.load_c... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
import spacy
from jina import Document, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from ...spacy_text_encoder import SpacyTextEncoder
cur_dir = ... |
import os
import shutil
import subprocess
import sys
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub', 'export', 'auth', 'cloud', 'ping'}
parser = get_main_par... | import os
import shutil
import subprocess
import sys
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub', 'export', 'auth', 'cloud', 'ping'}
parser = get_main_par... |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
image_size = (640, 640)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
mode... | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(norm_cfg=norm... |
"""Utilities for loading configurations from langchain_core-hub."""
import warnings
from typing import Any
from langchain_core._api.deprecation import deprecated
@deprecated(
since="0.1.30",
removal="1.0",
message=(
"Using the hwchase17/langchain-hub "
"repo for prompts is deprecated. Pl... | """Utilities for loading configurations from langchain_core-hub."""
import warnings
from typing import Any
from langchain_core._api.deprecation import deprecated
@deprecated(
since="0.1.30",
removal="1.0",
message=(
"Using the hwchase17/langchain-hub "
"repo for prompts is deprecated. Pl... |
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class IdentityTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
... | import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class IdentityTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from ..annoy_searcher impo... |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
DefaultOptimizerConstructor, build_optimizer)
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMoment... | # Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
DefaultOptimizerConstructor, build_optimizer,
build_optimizer_constructor)
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox2distance, distance2bbox
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder e... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.data_elements.bbox import bbox2distance, distance2bbox
from mmdet.registry import TASK_UTILS
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This code... |
# coding=utf-8
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses... | # coding=utf-8
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.reuters import get_label_names as get_label_names
from keras.src.datasets.reuters import get_word_index as get_word_index
from keras.src.datasets.reuters import load_data as ... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.reuters import get_label_names
from keras.src.datasets.reuters import get_word_index
from keras.src.datasets.reuters import load_data
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_p... | _base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_p... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageNdArray, ImageTensor, ImageTorchTensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
im... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.data_elements import DetDataSample
from mmdet.models import build_detector
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
cl... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.core import DetDataSample
from mmdet.models import build_detector
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestT... |
"""Strategies for updater tests."""
from typing import cast
import pytest
strategies = pytest.importorskip("hypothesis.strategies")
exact_parameter_strategy = strategies.fixed_dictionaries(
{
"nthread": strategies.integers(1, 4),
"max_depth": strategies.integers(1, 11),
"min_child_weigh... | """Strategies for updater tests."""
from typing import cast
import pytest
strategies = pytest.importorskip("hypothesis.strategies")
exact_parameter_strategy = strategies.fixed_dictionaries(
{
"nthread": strategies.integers(1, 4),
"max_depth": strategies.integers(1, 11),
"min_child_weigh... |
from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from lan... | from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from lan... |
from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,
BlockOutput,
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.generic import GenericWebhookType
class GenericWebhookT... | from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,
BlockOutput,
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.generic import GenericWebhookType
class GenericWebhookT... |
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr i... | # Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_r... |
import pathlib
from typing import Any, Optional
import pytest
from langchain.callbacks import FileCallbackHandler
from langchain.chains.base import CallbackManagerForChainRun, Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ... | import pathlib
from typing import Any, Dict, List, Optional
import pytest
from langchain.callbacks import FileCallbackHandler
from langchain.chains.base import CallbackManagerForChainRun, Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: ... |
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MainContentExtractorReader(BaseReader):
"""
MainContentExtractor web page reader.
Reads pages from the web.
Args:
text_format (str, optional): The ... | from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MainContentExtractorReader(BaseReader):
"""MainContentExtractor web page reader.
Reads pages from the web.
Args:
text_format (str, optional): The forma... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CenterNetUpdateHead
class TestCenterNetUpdateHead(TestCase):
def test_centernet_update_head_loss(self):
"""Tests fcos head loss wh... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import CenterNetUpdateHead
class TestCenterNetUpdateHead(TestCase):
def test_centernet_update_head_loss(self):
"""Tests fcos head loss when tru... |
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and i... | from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and i... |
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import Spar... | from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import Spar... |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.schema import Document
def test_header_splits() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Hea... | from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.schema import Document
def test_header_splits() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Hea... |
from datetime import datetime
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.earnings_call_transcript.utils import get_earnings_transcript
class EarningsCallTranscript(BaseReader):
def __init__(self, year: int, ti... | from datetime import datetime
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.earnings_call_transcript.utils import get_earnings_transcript
class EarningsCallTranscript(BaseReader):
def __init__(self, year: int, ti... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.model.wrappers import (MM... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
from mmengine... |
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._feature import _Feature, FillTypeJIT
class Mask(_Feature):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_sub... | from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._feature import _Feature, FillTypeJIT
class Mask(_Feature):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_sub... |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... |
from __future__ import annotations
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized) loss.
This is a modific... | from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, design... |
from typing import List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.query.query_transform.base import BaseQueryTransform
... | from typing import List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.query.query_transform.base import BaseQueryTransform
... |
"""Abstract interface for document loader implementations."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from langchain_core.runnables import run_in_executor
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator
from lan... | """Abstract interface for document loader implementations."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from langchain_core.runnables import run_in_executor
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator
from lan... |
from keras.src import backend
from keras.src import ops
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_pooling_output_shape
from keras.src.utils import argument_validation
class BasePooling(Layer):
"""Base pooling layer.... | from keras.src import backend
from keras.src import ops
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_pooling_output_shape
from keras.src.utils import argument_validation
class BasePooling(Layer):
"""Base pooling layer.... |
import pytest
from langchain_core.documents import Document
from langchain.indexes._api import _HashedDocument
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument( # type: ignore[call-arg]
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
... | import pytest
from langchain_core.documents import Document
from langchain.indexes._api import _HashedDocument
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument( # type: ignore[call-arg]
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
... |
import base64
import re
from typing import Union
from langchain_core.messages import (
AIMessage,
BaseMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from pydantic import BaseModel
from .core import Invoker, Prompty, SimpleModel
class RoleMap:
_ROLE_MAP: dict[str, type[BaseMessage]]... | import base64
import re
from typing import Dict, List, Type, Union
from langchain_core.messages import (
AIMessage,
BaseMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from pydantic import BaseModel
from .core import Invoker, Prompty, SimpleModel
class RoleMap:
_ROLE_MAP: Dict[str, ... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .anchor_head import AnchorHead
@MODEL... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from torch import Tensor
from mmdet.core.utils import OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .anchor_head import AnchorHead
@... |
# type: ignore
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
from typing import Optional
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (... | # type: ignore
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (
dump_migrations_as_grit... |
import os
from typing import BinaryIO, Optional, Union
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
fro... | import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..uti... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'... |
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyUrl
@pytest.mark.proto
def test_proto_any_url():
uri = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
uri._to_node_protobuf()
def test_json_schema():
... | import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AnyUrl
@pytest.mark.proto
def test_proto_any_url():
uri = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
uri._to_node_protobuf()
def test_json_schema():... |
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (... | import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_shar... |
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil" # "naver/efficient-splade-V-large-doc" # "... | import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/effici... |
from typing import Optional
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or... | from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
... |
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
tra... | _base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
tra... |
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
| import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
from ._dsp import adsr_envelope, extend_pitch, frequency_impulse_response, oscillator_bank, sinc_impulse_response
from .functional import add_noise, barkscale_fbanks, convolve, deemphasis, fftconvolve, preemphasis, speed
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"de... | from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response
from .functional import add_noise, barkscale_fbanks, convolve, deemphasis, fftconvolve, preemphasis, speed
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"deemphasis",
"extend_pitch"... |
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, TypeVar
import torch
from torch.utils._pytree import tree_map
from torchvision.tv_tensors._tv_tensor import TVTensor
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(TVTensor):
categories: Sequence[str] | No... | from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.tv_tensors._tv_tensor import TVTensor
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(TVTensor):
categories: Optional[Sequence[str]]... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.7.3'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.7.2'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: add_voter.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_poo... | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: add_voter.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_poo... |
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import ADE20KPanopticDataset
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import COCOCaptionDataset
from .coco_panoptic... | # Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import ADE20KPanopticDataset
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman ... |
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser, set_pod_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import a... | import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser, set_pod_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import a... |
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
w... | """
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
w... |
from langchain_core.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """
You are an agents controlling a browser. You are given:
(1) an objective that you are trying to achieve
(2) the URL of your current web page
(3) a simplified text description of what's visible in the browser window (more on that below)... | # flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """
You are an agents controlling a browser. You are given:
(1) an objective that you are trying to achieve
(2) the URL of your current web page
(3) a simplified text description of what's visible in the browser window (more... |
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
)
from llama_index.llms.nvidia import NVIDIA
@pytest.mark.integration
def test_chat(chat_model: str, mode: dict) -> None:
message = ChatMessage(content="Hello")
response = NVIDIA(model=chat_... | import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
)
from llama_index.llms.nvidia import NVIDIA
@pytest.mark.integration()
def test_chat(chat_model: str, mode: dict) -> None:
message = ChatMessage(content="Hello")
response = NVIDIA(model=cha... |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... |
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import numpy as np
import pytest
import torch
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from ... | # TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import numpy as np
import pytest
import torch
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from ... |
import warnings
from typing import Any, Dict, List, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
from typing_extensions import Literal
from ._transform imp... | import warnings
from typing import Any, Dict, List, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
from typing_extensions import Literal
from ._transform impor... |
"""Test in memory docstore."""
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just received a new result from ... | """Test in memory docstore."""
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just received a new result from ... |
from collections.abc import Awaitable
from typing import TYPE_CHECKING, Callable, Optional, Union
from langchain_core.runnables.config import (
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
)
from langchain_core.tracers.base import AsyncBaseTracer, BaseTracer
from langchain_c... | from collections.abc import Awaitable
from typing import Callable, Optional, Union
from uuid import UUID
from langchain_core.runnables.config import (
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
)
from langchain_core.tracers.base import AsyncBaseTracer, BaseTracer
from lang... |
import itertools
import torch
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoCtcDecoder,
TempDirMixin,
TorchaudioTestCase,
)
NUM_TOKENS = 8
@skipIfNoCtcDecoder
class CTCDecoderTest(TempDirMixin, TorchaudioTestCase):
def _get_decode... | import itertools
import torch
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_asset_path,
skipIfNoCtcDecoder,
)
NUM_TOKENS = 8
@skipIfNoCtcDecoder
class CTCDecoderTest(TempDirMixin, TorchaudioTestCase):
def _get_decode... |
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List ... | # coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__ = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all foun... |
"""Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms... | """Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms... |
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransfor... | from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransfor... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.mark.parametrize('request_size', [1, 10, 50, 100]... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from ...spacy_text_encoder import SpacyTextEncoder
def test_spacy_text_encoder():
docs = ... |
from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
... | from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.