input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable... | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable... |
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensio... | from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions_video,
get_di... |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.load import dataset_module_factory, import_main_class
from data... | import os
from tempfile import TemporaryDirectory
from unittest import TestCase
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.load import dataset_module_factory, import_main_class
from data... |
from __future__ import annotations
import sys
from .classification import CrossEncoderClassificationEvaluator
from .correlation import CrossEncoderCorrelationEvaluator
from .deprecated import (
CEBinaryAccuracyEvaluator,
CEBinaryClassificationEvaluator,
CECorrelationEvaluator,
CEF1Evaluator,
CERer... | from __future__ import annotations
# TODO: Consider renaming all evaluators to CrossEncoder..., e.g. CrossEncoderNanoBEIREvaluator, CrossEncoderClassificationEvaluator, etc.
from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
fro... |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
from jina import Flow
import os
os.environ['JINA_LOG_LEVEL'] = 'DEBUG'
if __name__ == '__main__':
with Flow.load_config('flow.yml') as f:
f.block()
| from jina import Flow
import os
os.environ['JINA_LOG_LEVEL'] = 'DEBUG'
if __name__ == '__main__':
with Flow.load_config('flow.yml') as f:
f.block()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
p... |
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_mode... | from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_mode... |
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePol... | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePol... |
from __future__ import annotations
from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import Sentence... | from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import SentencesDataset
__all__ = [
"Denoising... |
"""Firestore Reader."""
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
DEFAULT_FIRESTORE_DATABASE = "(default)"
USER_AGENT = "LlamaHub"
IMPORT_ERROR_MSG = (
"`firestore` package not found, please run `pip3 install google-cl... | """Firestore Reader."""
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
DEFAULT_FIRESTORE_DATABASE = "(default)"
USER_AGENT = "LlamaHub"
IMPORT_ERROR_MSG = (
"`firestore` package not found, please run `pip3 install google-cl... |
import csv
import os
import random
import string
from pathlib import Path
from torchaudio.datasets import fluentcommands
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
HEADER = ["", "path", "speakerId", "transcription", "action", "object", "location"]
SLOTS = [... | import csv
import os
import random
import string
from pathlib import Path
from torchaudio.datasets import fluentcommands
from torchaudio_unittest.common_utils import (
get_whitenoise,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
HEADER = ["", "path", "speakerId", "transcription", "action", "object", ... |
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import pickle
import warnings
from collections import OrderedDict
import torch
import torch.distributed as dist
from mmcv.runner import OptimizerHook, get_dist_info
from torch._utils import (_flatten_dense_tensors, _take_tensors,
... | import functools
import pickle
import warnings
from collections import OrderedDict
import torch
import torch.distributed as dist
from mmcv.runner import OptimizerHook, get_dist_info
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def _allreduce_coa... |
import concurrent.futures
import importlib
import subprocess
from pathlib import Path
def test_importable_all() -> None:
for path in Path("../core/langchain_core/").glob("*"):
module_name = path.stem
if not module_name.startswith(".") and path.suffix != ".typed":
module = importlib.imp... | import concurrent.futures
import importlib
import subprocess
from pathlib import Path
def test_importable_all() -> None:
for path in Path("../core/langchain_core/").glob("*"):
module_name = path.stem
if not module_name.startswith(".") and path.suffix != ".typed":
module = importlib.imp... |
import os
import numpy as np
import pytest
from docarray import Document
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_single_doc_summary():
# empty doc
Document().summary()
# nested doc
Document(
chunks=[
Document(),
Document(chunks=[Document()]),
... | import os
import numpy as np
from docarray import Document
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_single_doc_summary():
# empty doc
Document().summary()
# nested doc
Document(
chunks=[
Document(),
Document(chunks=[Document()]),
Docu... |
"""Flat reader."""
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
"... | """Flat reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
"""Flat reader.
Extract raw text from a file and save the file type in the metadata
"""... |
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from unittest import TestCase
from unittest.mock import Mock
from mmcv.cnn import VGG
from mmengine.dataset import BaseDataset
from torch import nn
from mmdet.engine.hooks import NumClassCheckHook
from mmdet.models.roi_heads.mask_heads import F... | # Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from unittest import TestCase
from unittest.mock import Mock
from mmcv.cnn import VGG
from mmengine.dataset import BaseDataset
from torch import nn
from mmdet.engine.hooks import NumClassCheckHook
from mmdet.models.roi_heads.mask_heads import F... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
... |
_base_ = './ga-rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
... | _base_ = './ga_rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
... |
# mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import copy
import queue
import torch
from torch._utils impo... | # mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import copy
import queue
import torch
from torch._utils impo... |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import (DeltaXYWHBBoxCoder,
DeltaXYWHBBoxCoderForGLIP)
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.... | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import DeltaXYWHBBoxCoder
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5.... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
fro... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
fro... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things,
num_stuff, img_metas):
"""Preprocess the ground truth for a image.
Args:
gt_labels (Tensor): Ground truth labels of each bbox,
... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things,
num_stuff):
"""Preprocess the ground truth for a image.
Args:
gt_labels (Tensor): Ground truth labels of each bbox,
with sha... |
__version__ = '0.13.18'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
| __version__ = '0.13.17'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_in... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_in... |
import tempfile
import unittest
from transformers import LlavaConfig
class LlavaConfigTest(unittest.TestCase):
def test_llava_reload(self):
"""
Simple test for reloading default llava configs
"""
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig()
... | import tempfile
import unittest
from transformers import LlavaConfig
class LlavaConfigTest(unittest.TestCase):
def test_llava_reload(self):
"""
Simple test for reloading default llava configs
"""
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig()
... |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.structures import InstanceData
... | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.structures import InstanceData
... |
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_f... | from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_f... |
import time
import pytest
from jina import Flow
from tests.integration.instrumentation import (
ExecutorFailureWithTracing,
ExecutorTestWithTracing,
get_services,
get_trace_ids,
get_traces,
partition_spans_by_kind,
spans_with_error,
)
@pytest.mark.parametrize(
'protocol, client_type,... | import time
import pytest
from jina import Flow
from tests.integration.instrumentation import (
ExecutorFailureWithTracing,
ExecutorTestWithTracing,
get_services,
get_trace_ids,
get_traces,
partition_spans_by_kind,
spans_with_error,
)
@pytest.mark.parametrize(
'protocol, client_type,... |
import os
import numpy as np
import pytest
from docarray import DocumentArray, Document
from docarray.array.annlite import DocumentArrayAnnlite
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySq... | import os
import numpy as np
import pytest
from docarray import DocumentArray, Document
from docarray.array.annlite import DocumentArrayAnnlite
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySq... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class YOLACT(SingleStageInstanceSegmentor):
"""Implementation of `YOLACT... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils.typing import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class YOLACT(SingleStageInstanceSegmentor):
"""Implementation of... |
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
# MMEngine support the following two ways, users can choose
# according to convenience
# optim_wrapper = dict... | _base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
fp16 = dict(loss_scale=512.)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CenterNet(SingleStageDetector):
"""Implementation of CenterNet(Objects as Points)
<... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CenterNet(SingleStageDetector):
"""Implementation of CenterNet(Objects as Points)
... |
from tempfile import NamedTemporaryFile
import huggingface_hub
import pytest
import requests
from packaging import version
from datasets.utils.file_utils import fsspec_get, fsspec_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline, require_not_windows
@pytest.mark.integration... | from tempfile import NamedTemporaryFile
import huggingface_hub
import pytest
import requests
from packaging import version
from datasets.utils.file_utils import fsspec_get, fsspec_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline, require_not_windows
@pytest.mark.integration... |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
... | from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
... |
from typing import Any, ForwardRef, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.id import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type ... | from typing import Any, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.id import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Te... |
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=d... | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
... |
# dataset settings
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='dis... | # dataset settings
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='dis... |
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
tra... | # dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
tra... |
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file... | _base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file... |
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.pat... | import numpy as np
from pydantic.tools import parse_obj_as
from docarray.typing import ImageUrl
def test_image_url():
uri = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, 'http://... |
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing._internal.common_utils import raise_on_run_directly
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
s... | # Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing._internal.common_utils import raise_on_run_directly
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
s... |
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
... | _base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
... |
_base_ = './solov2-light_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
| _base_ = 'solov2_light_r50_fpn_mstrain_3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChu... | """**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChu... |
from jsonschema import Draft7Validator
from jina.schemas import get_full_schema
def test_full_schema():
schema = get_full_schema()
Draft7Validator.check_schema(schema)
# assert jina concepts exist in definitions
for concept in ['gateway', 'flow', 'metas', 'deployment']:
assert f'Jina::{concep... | from jsonschema import Draft7Validator
from jina.schemas import get_full_schema
def test_full_schema():
Draft7Validator.check_schema(get_full_schema())
|
from typing import Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import (
... | from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.... |
from typing import Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def erase_image_tensor(
image: torch.Tensor, i: int, j: int,... | from typing import Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def erase_image_tensor(
image: torch.Tensor, i: int, j: int,... |
import numpy as np
from docarray import Document
from docarray.typing import Embedding
def test_set_embedding():
class MyDocument(Document):
embedding: Embedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, ... | import numpy as np
from docarray import Document
from docarray.typing import Embedding
def test_set_embedding():
class MyDocument(Document):
embedding: Embedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, Embedding)
assert isinstance(d.embedding, np.nda... |
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing... | from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing import DocumentArray... |
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CP... | """
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CP... |
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .registry import *
from .utils import *
| # Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .registry import *
from .utils import *
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate... | import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate... |
"""Fake LLMs for testing purposes."""
import asyncio
import time
from collections.abc import AsyncIterator, Iterator, Mapping
from typing import Any, Optional
from typing_extensions import override
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchai... | import asyncio
import time
from collections.abc import AsyncIterator, Iterator, Mapping
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_m... |
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.l... | from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.l... |
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.models import Pooling, Transformer
from... | from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, ... |
import warnings
from typing import Any
import torch
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
"""[DEPREACTED] Use to_image() and to_dtype() instead."""
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be remove... | import warnings
from typing import Any, List
import torch
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
"""[DEPREACTED] Use to_image() and to_dtype() instead."""
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be ... |
import asyncio
import time
from multiprocessing import Event, Process
import aiohttp
import pytest
from jina import DocumentArray, Executor, Flow, requests
from jina.helper import random_port
from jina.types.request.data import DataRequest
INPUT_DA_LEN = 2
NUM_CLIENTS = 3
@pytest.fixture()
def gateway_port():
... | import asyncio
import time
from multiprocessing import Event, Process
import aiohttp
import pytest
from jina import DocumentArray, Executor, Flow, requests
from jina.types.request.data import DataRequest
from jina.helper import random_port
INPUT_DA_LEN = 2
NUM_CLIENTS = 3
@pytest.fixture()
def gateway_port():
p... |
import os
from time import time
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark.... | import os
from time import time
import numpy as np
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark... |
from jina import Document, Flow
from sentencizer import Sentencizer
def test_exec():
f = Flow().add(uses=Sentencizer)
with f:
resp = f.post(
on='/test',
inputs=Document(text='Hello. World! Go? Back'),
)
assert resp[0].chunks[0].text == 'Hello.'
assert re... | from jina import Document, Flow
from sentencizer import Sentencizer
def test_exec():
f = Flow().add(uses=Sentencizer)
with f:
resp = f.post(
on='/test',
inputs=Document(text='Hello. World! Go? Back'),
return_results=True,
)
assert resp[0].docs[0].chu... |
from __future__ import annotations
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: list[str] = None, label: int | float = 0):
"""
Creates one InputExample with the given texts, guid and label
Ar... | from typing import List, Union
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
... |
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
from jina.serve.runtimes.gateway.streamer import GatewayStreamer
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
... | from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
from jina.serve.streamer import GatewayStreamer
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[... |
_base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# param_scheduler = [
# dict(
# type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa
# dict(
# type='MultiStepLR',
# begi... | _base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_shardi... | import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, HttpRes... |
from typing import Optional
from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain.retrievers.ensemble import EnsembleRetriever
class MockRetriever(BaseRetriever):
docs: list[Doc... | from typing import List, Optional
from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain.retrievers.ensemble import EnsembleRetriever
class MockRetriever(BaseRetriever):
docs: Li... |
import os
import pytest
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal
from typing import Any
from llama_index.core.schema import ImageDocument
def get_api_key(instance: Any) -> str:
return instance.api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with p... | import os
import pytest
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal
from typing import Any
from llama_index.core.schema import ImageDocument
def get_api_key(instance: Any) -> str:
return instance.api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with p... |
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel
from backend.data.block import BlockInput
class BlockCostType(str, Enum):
RUN = "run" # cost X credits per run
BYTE = "byte" # cost X credits per byte
SECOND = "second" # cost X credits per second
DOLLAR = "doll... | from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel
from backend.data.block import BlockInput
class BlockCostType(str, Enum):
RUN = "run" # cost X credits per run
BYTE = "byte" # cost X credits per byte
SECOND = "second" # cost X credits per second
class BlockCost(... |
import os
import pytest
import torchaudio
from torchaudio.pipelines import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_B... | import pytest
import torchaudio
from torchaudio.pipelines import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_chann... |
import numpy as np
import pytest
from docarray import Document, DocumentArray
from docarray.document import BaseDocument
from docarray.typing import NdArray
@pytest.fixture()
def da():
class Text(Document):
text: str
return DocumentArray([Text(text='hello') for _ in range(10)])
def test_iterate(da... | from docarray import DocumentArray, Document
def test_document_array():
class Text(Document):
text: str
da = DocumentArray([Text(text='hello') for _ in range(10)])
def test_document_array_fixed_type():
class Text(Document):
text: str
da = DocumentArray[Text]([Text(text='hello') for... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import FacebookChatLoader
from langchain_community.document_loaders.facebook_chat import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Us... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import FacebookChatLoader
from langchain_community.document_loaders.facebook_chat import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Us... |
from setuptools import find_packages
import setuptools
setuptools.setup(
name="jina_executors",
packages=find_packages(where=".", exclude=('tests',)),
include_package_data=True,
version="0.0.1",
author='Jina Dev Team',
author_email='dev-team@jina.ai',
description="A selection of Executors f... | from setuptools import find_packages
import setuptools
setuptools.setup(
name="jina-executors",
version="0.0.1",
author='Jina Dev Team',
author_email='dev-team@jina.ai',
description="A selection of Executors for Jina",
url="https://github.com/jina-ai/executors",
classifiers=[
"Progr... |
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import PromptTemplate
class FinishedOutputParser(BaseOutputParser[tuple[str, bool]]):
"""Output parser that checks if the output is finished."""
finished_value: str = "FINISHED"
"""Value that indicates the output is fi... | from typing import Tuple
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import PromptTemplate
class FinishedOutputParser(BaseOutputParser[Tuple[str, bool]]):
"""Output parser that checks if the output is finished."""
finished_value: str = "FINISHED"
"""Value that ... |
from typing import Optional
import torch
from docarray import Document, DocumentArray
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(Document):
text: str
tensor: Optional[TorchTensor[3, 224, 224]]
N = 10
batch = DocumentArray[Mmdoc](Mmdoc(text=f'hello{i}') ... | from typing import Optional
import torch
from docarray import Document, DocumentArray
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(Document):
text: str
tensor: Optional[TorchTensor[3, 224, 224]]
N = 10
batch = DocumentArray[Mmdoc](Mmdoc(text=f'hello{i}') ... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | import pytest
from docarray.utils._internal.misc import is_jax_available
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp
from docarray.computation.jax_backend import JaxCompBackend
from docarray.typing import JaxArray
metrics = JaxCompBackend.Metrics
else:
metrics = ... |
from llama_index.core.tools.types import BaseTool, ToolOutput, adapt_to_async_tool
from typing import TYPE_CHECKING, Sequence
from llama_index.core.llms.llm import ToolSelection
import json
if TYPE_CHECKING:
from llama_index.core.tools.types import BaseTool
def call_tool(tool: BaseTool, arguments: dict) -> ToolO... | from llama_index.core.tools.types import BaseTool, ToolOutput, adapt_to_async_tool
from typing import TYPE_CHECKING, Sequence
from llama_index.core.llms.llm import ToolSelection
import json
if TYPE_CHECKING:
from llama_index.core.tools.types import BaseTool
def call_tool(tool: BaseTool, arguments: dict) -> ToolO... |
"""CIFAR100 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar100.load_data")
def load_da... | """CIFAR100 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar100.load_data")
def load_da... |
import numpy as np
import pytest
import torch
from docarray.base_doc import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyUrl, NdArray, TorchTensor
@pytest.fixture()
def doc_and_class():
class Mmdoc(BaseDoc):
img: NdArray
url: AnyUrl
txt: str
... | import numpy as np
import pytest
import torch
from docarray.base_document import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AnyUrl, NdArray, TorchTensor
@pytest.fixture()
def doc_and_class():
class Mmdoc(BaseDocument):
img: NdArray
url: AnyUrl... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fuse... | import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block... |
"""
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_do... | """
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_do... |
__version__ = '0.14.8'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.14.7'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# flake8: noqa
from . import utils
from .utils import get_audio_backend, list_audio_backends, set_audio_backend
utils._init_audio_backend()
| # flake8: noqa
from . import utils
from .utils import (
list_audio_backends,
get_audio_backend,
set_audio_backend,
)
utils._init_audio_backend()
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:... | """
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType
from ..utils.m... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType
from ..util... |
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyUrl
@pytest.mark.proto
def test_proto_any_url():
uri = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
uri._to_node_protobuf()
def test_json_schema():
... | import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyUrl
@pytest.mark.proto
def test_proto_any_url():
uri = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
uri._to_node_protobuf()
def test_json_schema():
... |
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import sys
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_tran... | """
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from s... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... |
import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
... | import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
... |
from pathlib import Path
import click
from rich.console import Console
from rich.theme import Theme
from .pkg import pkg
from .test import test
LLAMA_DEV_THEME = Theme(
{
"repr.path": "",
"repr.filename": "",
"repr.str": "",
"traceback.note": "cyan",
"info": "dim cyan",
... | from pathlib import Path
import click
from rich.console import Console
from rich.theme import Theme
from .pkg import pkg
from .test import test
LLAMA_DEV_THEME = Theme(
{
"repr.path": "",
"repr.filename": "",
"repr.str": "",
"traceback.note": "cyan",
"info": "dim cyan",
... |
"""Open Weather Map tool spec."""
from typing import Any, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class OpenWeatherMapToolSpec(BaseToolSpec):
"""Open Weather tool spec."""
spec_functions = ["weather_at_location", "forecast_tomorrow_at... | """Open Weather Map tool spec."""
from typing import Any, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class OpenWeatherMapToolSpec(BaseToolSpec):
"""Open Weather tool spec."""
spec_functions = ["weather_at_location", "forecast_tommorrow_a... |
import asyncio
import sys
import pytest
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
def test_smoke():
"""No-op test: CI will fail if no tests are collected."""
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or hig... | import asyncio
import sys
import pytest
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
def test_smoke():
"""No-op test: CI will fail if no tests are collected."""
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or hig... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import StarRocks
from langchain_community.vectorstores.starrocks import StarRocksSettings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate lo... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import StarRocks
from langchain_community.vectorstores.starrocks import StarRocksSettings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate lo... |
import aiohttp
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.excepts import BadServer
from jina.logging.logger import JinaLogger
from jina.types.request.data import Data... | import aiohttp
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.logging.logger import JinaLogger
from jina.types.request.data import DataRequest
logger = JinaLogger('client... |
import json
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import ConfigDict
from langchain_community.utilities.graphql import GraphQLAPIWrapper
class BaseGraphQLTool(BaseTool):
"""Base tool for querying a GraphQ... | import json
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import ConfigDict
from langchain_community.utilities.graphql import GraphQLAPIWrapper
class BaseGraphQLTool(BaseTool): # type: ignore[override]
"""Base ... |
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "... | import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=601))
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config =... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=601))
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config =... |
from unittest.mock import MagicMock
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.tools import FunctionTool
from llama_index.llms.oci_genai import OCIGenAI
def test_oci_genai_embedding_class():
names_of_base_classes = [b.__name__ for b in OCIGenAI.__mro__]
assert BaseLLM.__name__ i... | from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.oci_genai import OCIGenAI
def test_oci_genai_embedding_class():
names_of_base_classes = [b.__name__ for b in OCIGenAI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
_base_ = 'retinanet_r50_fpn_1x_coco.py'
# training schedule for 90k
train_cfg = dict(by_epoch=False, max_iters=90000)
val_cfg = dict(interval=10000)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',... | _base_ = 'retinanet_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[60000, 80000])
# Runner type
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
checkpoint_config = dict(interval=10000)
evalu... |
"""Tool for the SearchApi.io search API."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.searchapi import SearchApiAPIWrap... | """Tool for the SearchApi.io search API."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.searchapi import SearchApiAPIWrap... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.