input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pip... | _base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
... |
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Union,
Optional,
Dict,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
fr... | from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Union,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
from ....array.mixins.find... |
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import T... | # Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import AspectRatioBatchSampler
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'... |
from typing import Any
import pytest
from langchain_community.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def import_lancedb() -> Any:
try:
import lancedb
except ImportError as e:
raise ImportError(
"Could not import... | from typing import Any
import pytest
from langchain_community.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def import_lancedb() -> Any:
try:
import lancedb
except ImportError as e:
raise ImportError(
"Could not import... |
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, ApexOptimWrapper,
DefaultOptimWrapperConstructor, OptimWrapper,
OptimWrapperDict, build_optim_wrapper)
# yapf: disable
... | # Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, Consta... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-270k_coco.py' # noqa
# training schedule for 90k
max_iters = 90000
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, ... | _base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py'
# training schedule for 90k
max_iters = 90000
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict... |
from jina import Executor, requests
from docarray import DocList
from docarray.documents import TextDoc
class MyExecutor(Executor):
@requests
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[TextDoc]:
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
return docs | from jina import Executor, requests, DocumentArray
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
dept... | # model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
preprocess_cfg=preprocess_cfg,
type='FasterRCNN',
backbone=dict(
type='ResNet',
dept... |
import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-... | from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling... |
from typing import Literal, Optional
from langchain_core.agents import AgentAction
def _escape(xml: str) -> str:
"""Replace XML tags with custom safe delimiters."""
replacements = {
"<tool>": "[[tool]]",
"</tool>": "[[/tool]]",
"<tool_input>": "[[tool_input]]",
"</tool_input>"... | from langchain_core.agents import AgentAction
def format_xml(
intermediate_steps: list[tuple[AgentAction, str]],
) -> str:
"""Format the intermediate steps as XML.
Args:
intermediate_steps: The intermediate steps.
Returns:
The intermediate steps as XML.
"""
log = ""
for a... |
"""Integration test for Stack Exchange."""
from langchain_community.utilities import StackExchangeAPIWrapper
def test_call() -> None:
"""Test that call runs."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run("zsh: command not found: python")
assert output != "hello"
def test_f... | """Integration test for Stack Exchange."""
from langchain_community.utilities import StackExchangeAPIWrapper
def test_call() -> None:
"""Test that call runs."""
stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg]
output = stackexchange.run("zsh: command not found: python")
assert outp... |
_base_ = '../gcnet/mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_ch... | _base_ = '../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
... |
import random
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
from keras.src.utils.module_utils import tensorflow as tf
GLOBAL_RANDOM_SEED = "global_random_seed"
@keras_export("keras.utils.set_random_seed")
def set_ran... | import random
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras.utils.set_random_seed")
def set_random_seed(seed):
"""Sets all random seeds (Python, NumPy, and backend framework, e.g. TF).... |
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
def wrap(wrappee, *, like, **kwargs):
"""[BETA] Convert a :class:`torch.Tens... | import torch
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
def wrap(wrappee, *, like, **kwargs):
"""[BETA] Convert a :class:`torch.Tensor... |
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent... | from typing import Any, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents... |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epoch... | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epoch... |
from __future__ import annotations
import os
from copy import deepcopy
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datas... | from __future__ import annotations
import os
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
f... |
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: Image
def test_from_to_json():
da = DocumentArray[MyDoc](
[
MyDoc(embedding=[1, 2, 3, 4, 5], te... | import pytest
from docarray import BaseDocument
from docarray.typing import NdArray
from docarray.documents import Image
from docarray import DocumentArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: Image
def test_from_to_json():
da = DocumentArray[MyDoc](
[
... |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to... | import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to... |
"""Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic impo... | """Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic impo... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import elastic_transform
from keras.src.ops.image import extract_patches
from ke... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import gaussian_blur
from keras.... |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... |
from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.elasticsearch import ElasticsearchKVStore
class ElasticsearchIndexStore(KVIndexStore):
"""
Elasticsearch Index store.
Args:
elasticsearch_kvstore (Elastic... | from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.elasticsearch import ElasticsearchKVStore
class ElasticsearchIndexStore(KVIndexStore):
"""Elasticsearch Index store.
Args:
elasticsearch_kvstore (Elasticsearc... |
"""LLM Compiler agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import S... | """LLM Compiler agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import S... |
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
fil... | _base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
fil... |
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
from typing import Optional, Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
T = TypeVar('T', bound='ID')
class ID(str, BaseNode):
"""
Repres... | from typing import TYPE_CHECKING, Optional, Type, TypeVar, Union
from uuid import UUID
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ID')
class ID(str, B... |
_base_ = './maskformer_r50_mstrain_16x1_75e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size... | _base_ = './maskformer_r50_mstrain_16x1_75e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size... |
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Tuple
from mmcv.runner import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptConfigType, OptMultiConfig,
SampleList)
from mmdet.registry im... | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple, Union
from mmcv.runner import BaseModule
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from torch import Tensor
from mmdet.core import DetDataSample
from mm... |
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None)... | from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None)... |
import os
import json
import time
import pytest
from urllib import request
from jina import Flow
from jina.serve.runtimes.gateway.http.models import _to_camel_case
from docarray import Document
from jina import helper
from jina import Executor, requests
from tests import validate_callback
cur_dir = os.path.dirname(o... | import os
import json
import time
import pytest
from urllib import request
from jina import Flow
from jina.serve.runtimes.gateway.http.models import _to_camel_case
from jina import Document
from jina import helper
from jina import Executor, requests
from tests import validate_callback
cur_dir = os.path.dirname(os.pa... |
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.multi_modal_llms.nebius import NebiusMultiModal
def test_multi_modal_class():
names_of_base_classes = [b.__name__ for b in NebiusMultiModal.__mro__]
assert OpenAIMultiModal.__name__ in names_of_base_classes
| from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.nebius import NebiusMultiModal
def test_multi_modal_class():
names_of_base_classes = [b.__name__ for b in NebiusMultiModal.__mro__]
assert MultiModalLLM.__name__ in names_of_base_classes
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'CLASSES':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'dinin... | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'CLASSES':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'dinin... |
"""
OpenAI Agent.
Simple wrapper around AgentRunner + OpenAIAgentWorker.
For the legacy implementation see:
```python
from llama_index.agent.legacy.openai.base import OpenAIAgent
```
"""
from typing import (
Any,
Dict,
List,
Callable,
Optional,
Type,
)
from llama_index.agent.openai.step impo... | """OpenAI Agent.
Simple wrapper around AgentRunner + OpenAIAgentWorker.
For the legacy implementation see:
```python
from llama_index.agent.legacy.openai.base import OpenAIAgent
```
"""
from typing import (
Any,
Dict,
List,
Callable,
Optional,
Type,
)
from llama_index.agent.openai.step impor... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | from ..utils import is_torch_available
if is_torch_available():
from .faster_cache import FasterCacheConfig, apply_faster_cache
from .group_offloading import apply_group_offloading
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_castin... |
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdAr... | from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdAr... |
from keras.src.utils.module_utils import dmtree
def register_tree_node_class(cls):
return cls
def is_nested(structure):
return dmtree.is_nested(structure)
def traverse(func, structure, top_down=True):
return dmtree.traverse(func, structure, top_down=top_down)
def flatten(structure):
return dmtre... | from keras.src.utils.module_utils import dmtree
def register_tree_node_class(cls):
return cls
def is_nested(structure):
return dmtree.is_nested(structure)
def traverse(func, structure, top_down=True):
return dmtree.traverse(func, structure, top_down=top_down)
def flatten(structure):
return dmtre... |
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imp... | from typing import Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import (
ID,
AnyUrl,
Embedding,
ImageUrl,
NdArray,
TextUrl,
... |
import os
import pathlib
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args... | import os
import pathlib
from typing import Callable, Optional
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args:
ro... |
_base_ = 'ssd300_coco.py'
# model settings
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 2... | _base_ = 'ssd300_coco.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
... |
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.util import is_training_available
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051... | from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f... |
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .RegularizerLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegatives... | from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .RegularizerLoss import FlopsLoss, L0FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMult... |
from llama_index.core.exec_utils import _contains_protected_access
def test_contains_protected_access() -> None:
assert not _contains_protected_access("def _a(b): pass"), (
"definition of dunder function"
)
assert _contains_protected_access("a = _b(c)"), "call to protected function"
assert not... | from llama_index.core.exec_utils import _contains_protected_access
def test_contains_protected_access() -> None:
assert not _contains_protected_access(
"def _a(b): pass"
), "definition of dunder function"
assert _contains_protected_access("a = _b(c)"), "call to protected function"
assert not _... |
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# param_scheduler = [
# dict(
# type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa
# dict(
# type='MultiStepLR',
# begi... | _base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# learning policy
lr_config = dict(step=[16, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
... | from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import ort_validate
__all__ = ['ort_validate']
| from .utils import ort_validate
__all__ = ['ort_validate']
|
"""Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
... | """Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
... |
import glob
import os
import cv2
import pytest
from jina import Document, DocumentArray
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='package')
def build_da():
def _build_da():
return DocumentArray(
[
Document(blob=cv2.imread(path), tags={'filena... | import glob
import os
import cv2
import pytest
from jina import DocumentArray, Document
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='package')
def build_da():
def _build_da():
return DocumentArray([
Document(blob=cv2.imread(path), tags={'filename': path.split('/... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
class TestRPN(TestCase):
@parameterized.... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestRPN(TestCase):
@parameterized.expand(... |
import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..uti... | import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..uti... |
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain:
- a [... | from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain:
- a [... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class BalanceSheetsSchema(BaseModel):
"""Input... | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class BalanceSheetsSchema(BaseModel):
"""Input... |
__version__ = '0.1.0'
from docarray.array.array import DocumentArray
from docarray.document.document import BaseDocument
from docarray.predefined_document import Audio, Image, Mesh3D, PointCloud3D, Text
__all__ = [
'BaseDocument',
'DocumentArray',
'Image',
'Audio',
'Text',
'Mesh3D',
'Point... | __version__ = '0.1.0'
from docarray.array.array import DocumentArray
from docarray.document.document import BaseDocument
from docarray.predefined_document import Image, Mesh3D, PointCloud3D, Text
__all__ = ['BaseDocument', 'DocumentArray', 'Image', 'Text', 'Mesh3D', 'PointCloud3D']
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.utils import digit_version
from torch import Tensor
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normaliz... |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
... | from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCasca... | # Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCasca... |
import inspect
import re
from typing import Dict, List
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql imp... | import inspect
import re
from hashlib import sha256
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from... |
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
STUB_MODEL_NAME = "placeholder_model"
@pytest.fixture(name="hf_inference_api")
def fixture_hf_inference_api() -> HuggingFaceInferen... | from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
STUB_MODEL_NAME = "placeholder_model"
@pytest.fixture(name="hf_inference_api")
def fixture_hf_inference_api() -> HuggingFaceInferen... |
_base_ = './gfl_r50_fpn_1x_coco.py'
max_epochs = 24
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
... | _base_ = './gfl_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
# multi-scale training
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
... |
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import logging
import traceback
from datetime import datetime
fr... | """
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import traceback
from datasets import load_dataset
from sentence_... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple, Union
import cv2
import numpy as np
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.dl_utils import tensor2imgs
DATA_BATCH = Optional[Union[dict, tuple, list]]
... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple, Union
import cv2
import numpy as np
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.dl_utils import tensor2imgs
DATA_BATCH = Optional[Union[dict, tuple, list]]
... |
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_similarity_metric import (
AnswerSimilarityMetric,
)
from tonic_validate.services.op... | from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_similarity_metric import (
AnswerSimilarityMetric,
)
from tonic_validate.services.op... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset):
"""Ba... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset):
"""Ba... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mmcv
import numpy as np
try:
import imageio
except ImportError:
imageio = None
def parse_args():
parser = argparse.ArgumentParser(d... | import argparse
import os
import os.path as osp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mmcv
import numpy as np
try:
import imageio
except ImportError:
imageio = None
def parse_args():
parser = argparse.ArgumentParser(description='Create GIF for demo')
parser.add... |
from typing import Any, Union
from ..utils import add_end_docstrings
from .base import GenericTensor, Pipeline, build_pipeline_init_args
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False),
r"""
tokenize_kwargs (`dict`, *optional*):
Addition... | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings
from .base import GenericTensor, Pipeline, build_pipeline_init_args
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False),
r"""
tokenize_kwargs (`dict`, *optional*):
... |
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
... | import pathlib
import pytest
from mktestdocs import check_md_file
# @pytest.mark.parametrize('fpath', pathlib.Path("docs").glob("**/*.md"), ids=str)
# to use later
@pytest.mark.parametrize(
'fpath', pathlib.Path('docs/user_guide').glob('**/*.md'), ids=str
)
def test_files_good(fpath):
check_md_file(fpath=fpa... |
import json
from jina.orchestrate.flow.base import Flow
from jina.orchestrate.deployments import Deployment
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s ... | import json
from jina.orchestrate.flow.base import Flow
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s yaml files
:param args: args from CLI
"""
... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl
__all__ = ['Mesh3DUrl', 'PointCloud3DUrl']
|
# model settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
model = dict(
type='FasterRCNN',
img_norm_cfg=img_norm_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stag... | # model settings
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(... |
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
class C... | from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
class C... |
"""This module is deprecated and will be removed in a future release.
Please use LangChainTracer instead.
"""
from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any:
"""Throw an error because this has been replaced by get_headers."""
msg = (
"get_headers for LangChainTracerV1 is no... | from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any:
"""Throw an error because this has been replaced by get_headers."""
msg = (
"get_headers for LangChainTracerV1 is no longer supported. "
"Please use LangChainTracer instead."
)
raise RuntimeError(msg)
def LangC... |
import torch
from torchvision.prototype import features
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape... | import torch
from torchvision.prototype import features
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape... |
import io
import warnings
from abc import ABC
import numpy as np
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
if TYPE_CHECKING:
from docarray.typing.bytes.image_bytes import Imag... | import io
import warnings
from abc import ABC
import numpy as np
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
if TYPE_CHECKING:
from docarray.typing.bytes.image_bytes import Imag... |
"""
Utility Tools for the Portkey Class.
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
"""
from typing import TYPE_CHECKING, List
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.llms.anthropic import Anth... | """
Utility Tools for the Portkey Class.
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
"""
from typing import TYPE_CHECKING, List
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.llms.anthropic import Anth... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.sequence import (
TimeseriesGenerator as TimeseriesGenerator,
)
from keras.src.legacy.preprocessing.sequence import (
make_sampling_table as make_sampling... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.sequence import TimeseriesGenerator
from keras.src.legacy.preprocessing.sequence import make_sampling_table
from keras.src.legacy.preprocessing.sequence import sk... |
from llama_index.observability.otel.base import LlamaIndexOpenTelemetry
__all__ = [
"LlamaIndexOpenTelemetry",
]
| from llama_index.observability.otel.base import (
LlamaIndexOpenTelemetry
)
__all__ = [
"LlamaIndexOpenTelemetry",
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... |
"""Global Gemini Utilities (shared between Gemini LLM and Vertex)."""
from __future__ import annotations
from collections.abc import Sequence
from llama_index.core.base.llms.types import ChatMessage, MessageRole
ROLES_TO_GEMINI: dict[MessageRole, MessageRole] = {
MessageRole.USER: MessageRole.USER,
MessageR... | """Global Gemini Utilities (shared between Gemini LLM and Vertex)."""
from __future__ import annotations
from collections.abc import Sequence
from llama_index.core.base.llms.types import ChatMessage, MessageRole
ROLES_TO_GEMINI: dict[MessageRole, MessageRole] = {
MessageRole.USER: MessageRole.USER,
MessageR... |
# Copyright (c) OpenMMLab. All rights reserved.
from .history_buffer import HistoryBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = ['HistoryBuffer', 'MessageHub', 'MMLogger', 'print_log']
| # Copyright (c) OpenMMLab. All rights reserved.
from .log_buffer import LogBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = ['LogBuffer', 'MessageHub', 'MMLogger', 'print_log']
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CocoDataset
class TestCocoDataset(unittest.TestCase):
def test_coco_dataset(self):
# test CocoDataset
metainfo = dict(classes=('bus', 'car'), task_name='new_task')
dataset = CocoDataset(
... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CocoDataset
class TestCocoDataset(unittest.TestCase):
def test_coco_dataset(self):
# test CocoDataset
metainfo = dict(CLASSES=('bus', 'car'), task_name='new_task')
dataset = CocoDataset(
... |
__version__ = '0.12.3'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
| __version__ = '0.12.2'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
from typing import List
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: boo... | from typing import List
import datasets
from datasets.tasks import ImageClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
... |
import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of mult... | import asyncio
from typing import List
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that me... |
from __future__ import annotations
from typing import Any, Union
from langchain_core.retrievers import (
BaseRetriever,
RetrieverOutput,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
def create_retrieval_chain(
retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]],
... | from __future__ import annotations
from typing import Any, Dict, Union
from langchain_core.retrievers import (
BaseRetriever,
RetrieverOutput,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
def create_retrieval_chain(
retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]]... |
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def erase_image_tensor(
image: torch.Tensor, i: in... | from typing import Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Te... |
import copy as cp
import dataclasses
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from docarray.dataclasses import is_multimodal
from docarray.helper import typename
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
@lru... | import copy as cp
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from docarray.dataclasses import is_multimodal
from docarray.helper import typename
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
@lru_cache()
def _get_f... |
import torch
from docarray import BaseDoc
from docarray.typing import TorchTensor
def test_tensor_ops():
class A(BaseDoc):
tensor: TorchTensor[3, 224, 224]
class B(BaseDoc):
tensor: TorchTensor[3, 112, 224]
tensor = A(tensor=torch.ones(3, 224, 224)).tensor
tensord = A(tensor=torch.o... | import torch
from docarray import BaseDocument
from docarray.typing import TorchTensor
def test_tensor_ops():
class A(BaseDocument):
tensor: TorchTensor[3, 224, 224]
class B(BaseDocument):
tensor: TorchTensor[3, 112, 224]
tensor = A(tensor=torch.ones(3, 224, 224)).tensor
tensord = A... |
import numpy as np
import numpy.typing as npt
def oscillator_bank(
frequencies,
amplitudes,
sample_rate: float,
time_axis: int = -2,
):
"""Reference implementation of oscillator_bank"""
invalid = np.abs(frequencies) >= sample_rate / 2
if np.any(invalid):
amplitudes = np.where(inval... | import numpy as np
def oscillator_bank(
frequencies,
amplitudes,
sample_rate: float,
time_axis: int = -2,
):
"""Reference implementation of oscillator_bank"""
invalid = np.abs(frequencies) >= sample_rate / 2
if np.any(invalid):
amplitudes = np.where(invalid, 0.0, amplitudes)
pi... |
__version__ = '0.30.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
hand... | __version__ = '0.30.0a3'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
ha... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import ApproxMaxIoUAssigner
class TestApproxIoUAssigner(TestCase):
def test_approx_iou_assigner(self):
assigner = ApproxMaxIoU... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.task_modules.assigners import ApproxMaxIoUAssigner
class TestApproxIoUAssigner(TestCase):
def test_approx_iou_assigner(self):
assigner = ApproxMaxIoUAssign... |
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoMacOS,
skipIfNo... | from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox... |
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a ``PIL... | from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a ``PIL... |
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.neptune import (
NeptuneAnalyticsGraphStore,
NeptuneDatabaseGraphStore,
)
from llama_index.graph_stores.neptune.base import NeptuneBaseGraphStore
def test_neptune_analytics_graph_store():
names_of_bases = [b.__name__ ... | from unittest.mock import MagicMock, patch
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.neptune import (
NeptuneAnalyticsGraphStore,
NeptuneDatabaseGraphStore,
)
@patch("llama_index.graph_stores.neptune.NeptuneAnalyticsGraphStore")
def test_neptune_analytics_graph_... |
"""Test PandasDataframeParser"""
from typing import Any
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
... | """Test PandasDataframeParser"""
from typing import Any, Dict
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
... |
"""
Experimental support for external memory
========================================
This is similar to the one in `quantile_data_iterator.py`, but for external memory
instead of Quantile DMatrix. The feature is not ready for production use yet.
.. versionadded:: 1.5.0
See :doc:`the tutorial </tutorials/exter... | """
Experimental support for external memory
========================================
This is similar to the one in `quantile_data_iterator.py`, but for external memory
instead of Quantile DMatrix. The feature is not ready for production use yet.
.. versionadded:: 1.5.0
See :doc:`the tutorial </tutorials/exter... |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compressi... | import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compressi... |
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
from typing import Literal
from langchain_core.documents import Document
from langchain_core.indexing.api import _get_document_with_hash
def test_hashed_document_hashing() -> None:
document = Document(
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
hashed_docume... | import pytest
from langchain_core.documents import Document
from langchain_core.indexing.api import _HashedDocument
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument( # type: ignore[call-arg]
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.