input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
import sqlite3
import warnings
from dataclasses import dataclass, field
from tempfile import NamedTemporaryFile
from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.storage.base.backend import BaseBackendMixin
from docar... | import sqlite3
import warnings
from dataclasses import dataclass, field, asdict
from tempfile import NamedTemporaryFile
from typing import (
Iterable,
Dict,
Optional,
TYPE_CHECKING,
Union,
List,
Tuple,
)
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.s... |
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
from .cocoeval_mp import COCOevalMP
__all__ = ['COCO', 'COCOeval', 'COCOPanoptic', 'COCOevalMP']
| # Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
__all__ = ['COCO', 'COCOeval', 'COCOPanoptic']
|
import tracemalloc
from functools import wraps
from docarray import DocArray
from docarray.documents import TextDoc
def get_test_da(n: int):
return DocArray[TextDoc](gen_text_docs(n))
def gen_text_docs(n: int):
for i in range(n):
yield TextDoc(text=f'text {i}')
def profile_memory(func):
"""De... | import tracemalloc
from functools import wraps
from docarray import DocumentArray
from docarray.documents import TextDoc
def get_test_da(n: int):
return DocumentArray[TextDoc](gen_text_docs(n))
def gen_text_docs(n: int):
for i in range(n):
yield TextDoc(text=f'text {i}')
def profile_memory(func):... |
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.n... | from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.n... |
import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('doc... | import os
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_nam... |
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageBytes,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
Torc... | import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from ... |
import os
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Dict
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.io.json import orjson_dumps_and_decode
from docarray.base_doc.mixins import IOMixi... | import os
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Dict
import orjson
from pydantic import BaseModel, Field
from rich.console import Console
from docarray.base_doc.base_node import BaseNode
from docarray.base_doc.io.json import orjson_dumps_and_decode
from docarray.base_doc.mixins import IOMixi... |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=Fals... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=Fals... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
"""Flat reader."""
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
... | """Flat reader."""
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
"... |
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
)
from llama_index.llms.modelscope.base import ModelScopeLLM
@pytest.fixture()
def modelscope_llm():
return ModelScopeLLM()
@pytest.fixture()
def prompt():
return "Hi, my name is"
@pytest.fixture()
def messages(... | import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
)
from llama_index.llms.modelscope.base import ModelScopeLLM
@pytest.fixture()
def modelscope_llm():
return ModelScopeLLM()
@pytest.fixture()
def prompt():
return "Hi, my name is"
@pytest.fixture()
def messages(... |
"""Test Cohere API wrapper."""
from pathlib import Path
from pydantic import SecretStr
from pytest import MonkeyPatch
from langchain_community.llms.cohere import Cohere
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_cohere_call() ->... | """Test Cohere API wrapper."""
from pathlib import Path
from pydantic import SecretStr
from pytest import MonkeyPatch
from langchain_community.llms.cohere import Cohere
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_cohere_call() ->... |
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import... | """
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import... |
from typing import Dict
from hypothesis import given, note, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
pytestmark = tm.timeout(20)
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolera... | from hypothesis import given, note, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
pytestmark = tm.timeout(20)
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolerance': strategies.floats(1... |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
import asyncio
import pytest
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryMode,
)
from vespa.application import ApplicationPackage
from llama_index.vector_stores.vespa import VespaVectorStore, hybrid_template
try:
# Shoul... | import asyncio
import pytest
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryMode,
)
from vespa.application import ApplicationPackage
from llama_index.vector_stores.vespa import VespaVectorStore, hybrid_template
try:
# Shoul... |
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium
from .conformer import Conformer
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSe... | from .conformer import Conformer
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
... |
"""Loads RST files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredRSTLoader(UnstructuredFileLoader):
"""Load `RST` files using `Unstructured`.
... | """Loads RST files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredRSTLoader(UnstructuredFileLoader):
"""Load `RST` files using `Unstructured`.
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashi... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .data... |
import base64
import os
import pytest
import requests
from llama_index.core.llms import LLM
from llama_index.core.schema import ImageNode
from llama_index.multi_modal_llms.gemini import GeminiMultiModal
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in GeminiMultiModal.__mro__]
assert ... | import base64
import os
import pytest
import requests
from llama_index.core.llms import LLM
from llama_index.core.schema import ImageNode
from llama_index.multi_modal_llms.gemini import GeminiMultiModal
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in GeminiMultiModal.__mro__]
assert ... |
from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__... | from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... |
def get_full_schema() -> dict:
"""Get full schema
:return: the full schema for Jina core as a dict.
"""
from jina import __version__
from jina.importer import IMPORTED
from jina.schemas.deployment import schema_deployment
from jina.schemas.executor import schema_all_executors
from jina.s... | def get_full_schema() -> dict:
"""Get full schema
:return: the full schema for Jina core as a dict.
"""
from jina import __version__
from jina.importer import IMPORTED
from jina.schemas.executor import schema_all_executors
from jina.schemas.flow import schema_flow
from jina.schemas.meta ... |
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
w... | """
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
w... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import Compose
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class PipelineSwitchHook(Hook):
"""Switch data pipeline at switch_epoch.
Args:
switch_epoch (int): switch pipeline at this epo... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import Compose
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class PipelineSwitchHook(Hook):
"""Switch data pipeline at switch_epoch.
Args:
switch_epoch (int): switch pipeline at this epo... |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model ... | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model ... |
_base_ = './faster-rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',... | _base_ = './faster_rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',... |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pyt... | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pyt... |
# dataset settings
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='dis... | # dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
_delete_=True,
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_... |
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
import mmcv
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_ar... | # Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
import mmcv
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_ar... |
import json
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
... | import json
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from .convfc_bbox_head import ConvFCBBoxHead
@HEADS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forwar... | from mmdet.models.builder import HEADS
from .convfc_bbox_head import ConvFCBBoxHead
@HEADS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
to get intermediate s... |
import numpy as np
from docarray import BaseDocument
from docarray.typing import Embedding
def test_set_embedding():
class MyDocument(BaseDocument):
embedding: Embedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.ze... | import numpy as np
from docarray import Document
from docarray.typing import Embedding
def test_set_embedding():
class MyDocument(Document):
embedding: Embedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, ... |
from collections.abc import Sequence
from typing import Any, Optional
from langchain_qdrant.sparse_embeddings import SparseEmbeddings, SparseVector
class FastEmbedSparse(SparseEmbeddings):
"""An interface for sparse embedding models to use with Qdrant."""
def __init__(
self,
model_name: str ... | from typing import Any, List, Optional, Sequence
from langchain_qdrant.sparse_embeddings import SparseEmbeddings, SparseVector
class FastEmbedSparse(SparseEmbeddings):
"""An interface for sparse embedding models to use with Qdrant."""
def __init__(
self,
model_name: str = "Qdrant/bm25",
... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audi... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.t... |
import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.t... | import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.t... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
try:
import sklearn
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
except ImportError:
sklearn = None
class BaseEstimator:
pass
class TransformerMixin:
pass
def assert_sklearn_installed(symbol_name):
if sklearn is None:
raise Impo... | from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.base import check_is_fitted
from sklearn.utils._array_api import get_namespace
def _check_model(model):
"""Check whether the model need sto be compiled."""
# compile model if user gave us an un-compiled model
if ... |
"""O365 tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
O365CreateDraftMessage,
O365SearchEmails,
O365SearchEvents,
O365SendEvent,
O365SendMessage,
)
# Create a way to... | """O365 tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
O365CreateDraftMessage,
O365SearchEmails,
O365SearchEvents,
O365SendEvent,
O365SendMessage,
)
# Create a way to... |
"""Vectara RAG Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.schema import TextNode
from llama_index.indices.managed.vectara import VectaraIndex
class VectaraRagPack(BaseLlamaPack):
"""Vectara RAG pack."""
def __init__... | """Vectara RAG Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.schema import TextNode
from llama_index.indices.managed.vectara import VectaraIndex
class VectaraRagPack(BaseLlamaPack):
"""Vectara RAG pack."""
def __init_... |
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding2DTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{"data_format": "channels_f... | import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class ZeroPadding2DTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "chan... |
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip... | from sentence_transformers import SentenceTransformer, util, models
from PIL import ImageFile, Image
import numpy as np
import requests
###########
image = Image.open('two_dogs_in_snow.jpg')
from transformers import CLIPProcessor, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
proces... |
"""Test file reader."""
import json
import sys
from tempfile import TemporaryDirectory
import pytest
from llama_index.core.readers.json import JSONReader
def test_basic() -> None:
"""Test JSON reader in basic mode."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test1.json"
... | """Test file reader."""
from tempfile import TemporaryDirectory
from llama_index.core.readers.json import JSONReader
def test_basic() -> None:
"""Test JSON reader in basic mode."""
with TemporaryDirectory() as tmp_dir:
file_name = f"{tmp_dir}/test1.json"
with open(file_name, "w") as f:
... |
from docarray.base_document.document import BaseDocument
def test_base_document_init():
doc = BaseDocument()
assert doc.id is not None
| from docarray.document.document import BaseDocument
def test_base_document_init():
doc = BaseDocument()
assert doc.id is not None
|
_base_ = './mask-rcnn_hrnetv2p-w32-1x_coco.py'
model = dict(
backbone=dict(
extra=dict(
stage2=dict(num_channels=(18, 36)),
stage3=dict(num_channels=(18, 36, 72)),
stage4=dict(num_channels=(18, 36, 72, 144))),
init_cfg=dict(
type='Pretrained', checkpoi... | _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py'
model = dict(
backbone=dict(
extra=dict(
stage2=dict(num_channels=(18, 36)),
stage3=dict(num_channels=(18, 36, 72)),
stage4=dict(num_channels=(18, 36, 72, 144))),
init_cfg=dict(
type='Pretrained', checkpoi... |
"""LLM Compiler Output Parser."""
import re
from typing import Any, Dict, List, Sequence
from llama_index.core.tools import BaseTool
from llama_index.core.types import BaseOutputParser
from .schema import JoinerOutput, LLMCompilerParseResult
from .utils import get_graph_dict
THOUGHT_PATTERN = r"Thought: ([^\n]*)"
A... | """LLM Compiler Output Parser."""
import re
from typing import Any, Dict, List, Sequence
from llama_index.core.tools import BaseTool
from llama_index.core.types import BaseOutputParser
from .schema import JoinerOutput, LLMCompilerParseResult
from .utils import get_graph_dict
THOUGHT_PATTERN = r"Thought: ([^\n]*)"
A... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow
from ...torch_object_detection_segmenter import TorchObjectDetectionSegmenter
def test_exec():
f = Flow().add(uses=TorchObjectDetectionSegmenter)
with f:
resp = ... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow, Document
from ...torch_object_detection_segmenter import TorchObjectDetectionSegmenter
def test_exec():
f = Flow().add(uses=TorchObjectDetectionSegmenter)
with f:
resp = f... |
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ... | # Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | # Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... |
from typing import Any, Dict
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.llms.openai import BaseOpenAI
from langchain_community.utils.openai import is_openai_v1
DEFAULT_BASE_URL = "https://text.octoai.run/v1/"
D... | from typing import Any, Dict
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.llms.openai import BaseOpenAI
from langchain_community.utils.openai import is_openai_v1
DEFAULT_BASE_URL = "https://text.octoai.run/v1/"
D... |
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "... | import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "... |
"""
Getting started with categorical data
=====================================
Experimental support for categorical data.
In before, users need to run an encoder themselves before passing the data into XGBoost,
which creates a sparse matrix and potentially increase memory usage. This demo
showcases the experimental... | """
Getting started with categorical data
=====================================
Experimental support for categorical data.
In before, users need to run an encoder themselves before passing the data into XGBoost,
which creates a sparse matrix and potentially increase memory usage. This demo
showcases the experimental... |
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
model = dict(
data_preprocessor=dict(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
... | _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
img_norm_cfg = dict(
mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 64... |
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None)... | from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None)... |
from ._effector import AudioEffector
from ._playback import play_audio
from ._stream_reader import StreamReader
from ._stream_writer import CodecConfig, StreamWriter
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
| from ._playback import play_audio
from ._stream_reader import StreamReader
from ._stream_writer import StreamWriter
__all__ = [
"StreamReader",
"StreamWriter",
"play_audio",
]
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import ... | from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule
from .utils import ... |
import copy
import warnings
from collections.abc import Iterable, Iterator, Sized
from typing import TypeVar
from torch.utils.data.datapipes.datapipe import IterDataPipe
_T = TypeVar("_T")
__all__ = ["IterableWrapperIterDataPipe"]
class IterableWrapperIterDataPipe(IterDataPipe[_T]):
r"""
Wraps an iterable... | # mypy: allow-untyped-defs
import copy
import warnings
from torch.utils.data.datapipes.datapipe import IterDataPipe
__all__ = ["IterableWrapperIterDataPipe"]
class IterableWrapperIterDataPipe(IterDataPipe):
r"""
Wraps an iterable object to create an IterDataPipe.
Args:
iterable: Iterable objec... |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lice... | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lice... |
from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import SentencesDataset
__all__ = [
"Denoising... | from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentencesDataset import SentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
fr... | from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
fr... |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.datasets import CocoDataset
from mmdet.visualization import get_palette, jitter_color, palette_val
def test_palette():
assert palette_val([(1, 2, 3)])[0] == (1 / 255, 2 / 255, 3 / 255)
# test list
palette = [(1, 0, 0), (0, 1, ... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.datasets import CocoDataset
from mmdet.visualization import get_palette, palette_val
def test_palette():
assert palette_val([(1, 2, 3)])[0] == (1 / 255, 2 / 255, 3 / 255)
# test list
palette = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
palette_ = get_... |
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvisio... | _base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvisio... |
"""News article reader using Newspaper."""
import logging
from importlib.util import find_spec
from typing import Any, Generator, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class NewsArticleReader(BaseReader):
"""
... | """News article reader using Newspaper."""
import logging
from importlib.util import find_spec
from typing import Any, Generator, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class NewsArticleReader(BaseReader):
"""Si... |
"""Base callback handler that can be used to handle callbacks in langchain."""
from __future__ import annotations
from langchain_core.callbacks import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
... | """Base callback handler that can be used to handle callbacks in langchain."""
from __future__ import annotations
from langchain_core.callbacks import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
... |
import argparse
import pytest
from jina.parsers.hubble.new import mixin_hub_new_parser
def test_new_parser():
parser = argparse.ArgumentParser(
epilog=f'Test', description='Test Hub Command Line Interface'
)
mixin_hub_new_parser(parser)
args = parser.parse_args([])
assert not args.dock... | import argparse
import pytest
from jina.parsers.hubble.new import mixin_hub_new_parser
def test_new_parser():
parser = argparse.ArgumentParser(
epilog=f'Test', description='Test Hub Command Line Interface'
)
mixin_hub_new_parser(parser)
args = parser.parse_args([])
assert not args.add_... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric i... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric i... |
from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union, Any
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from... | from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from llam... |
default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=di... | default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=di... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestDy... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestDy... |
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pickle
from inspect import signature
import pytest
from sklearn.utils.deprecation import _is_deprecated, deprecated
@deprecated("qwerty")
class MockClass1:
pass
class MockClass2:
@deprecated("mockclass2_method")
de... | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pickle
import pytest
from sklearn.utils.deprecation import _is_deprecated, deprecated
@deprecated("qwerty")
class MockClass1:
pass
class MockClass2:
@deprecated("mockclass2_method")
def method(self):
pass
... |
from langchain_core._api.path import as_import_path, get_relative_path
__all__ = ["as_import_path", "get_relative_path"]
| from langchain_core._api.path import as_import_path, get_relative_path
__all__ = ["get_relative_path", "as_import_path"]
|
import numpy as np
import torch
import torchaudio.prototype.transforms as T
from scipy import signal
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
def _get_ratio(mat):
return (mat.sum() / mat.numel()).item()
class TransformsTestImpl(TestBaseMixin):
... | import numpy as np
import torch
import torchaudio.prototype.transforms as T
from scipy import signal
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class TransformsTestImpl(TestBaseMixin):
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
... |
from __future__ import annotations
from typing import Any, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler... | from __future__ import annotations
from typing import Any, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler... |
"""Default prompt for ReAct agent."""
from pathlib import Path
# TODO: have formatting instructions be a part of react output parser
with (
Path(__file__).parents[0] / Path("templates") / Path("system_header_template.md")
).open("r") as f:
__BASE_REACT_CHAT_SYSTEM_HEADER = f.read()
REACT_CHAT_SYSTEM_HEADER =... | """Default prompt for ReAct agent."""
from pathlib import Path
# TODO: have formatting instructions be a part of react output parser
with (
Path(__file__).parents[0] / Path("templates") / Path("system_header_template.md")
).open("r") as f:
__BASE_REACT_CHAT_SYSTEM_HEADER = f.read()
REACT_CHAT_SYSTEM_HEADER = ... |
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[[
dict... | tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import DoctranTextTranslator
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import DoctranTextTranslator
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... |
"""Retriever tool."""
from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING, Literal, Optional, Union
from pydantic import BaseModel, Field
from langchain_core.prompts import (
BasePromptTemplate,
PromptTemplate,
aformat_document,
format_document,
)
from l... | from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING, Literal, Optional, Union
from pydantic import BaseModel, Field
from langchain_core.prompts import (
BasePromptTemplate,
PromptTemplate,
aformat_document,
format_document,
)
from langchain_core.tools.sim... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from mmdet.registry import MODELS
@MODELS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://... | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from mmdet.registry import MODELS
@MODELS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://... |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LLMMetadata
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.core.base.llms.generic_utils import ge... | from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LLMMetadata
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.core.base.llms.generic_utils import ge... |
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.sr... | from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.sr... |
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a mask... | from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a mask... |
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2nda... | # Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2nda... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_scholar.tool import GoogleScholarQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling ... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_scholar.tool import GoogleScholarQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling ... |
_base_ = './ga-retinanet_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| _base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
"""Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain_community.utilities.cassandra import SetupMode
if TYPE_CHECKING:
from cassandra.cluster import Se... | """Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain_community.utilities.cassandra import SetupMode
if TYPE_CHECKING:
from cassandra.cluster import Se... |
_base_ = './mask-rcnn_swin-t-p4-w7_fpn_amp-ms-crop-3x_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
depths=[2, 2, 18, 2],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
| _base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
model = dict(
backbone=dict(
depths=[2, 2, 18, 2],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import torch
from torch.distributed.tensor._op_schema import (
OpSchema,
OpSpec,
OpStrategy,
StrategyType,
)
from torch.distributed.tensor._ops.utils import is_tensor_partial, register_op_strategy
aten = torch.ops.aten
@register_op_strategy(
[
... | # Copyright (c) Meta Platforms, Inc. and affiliates
import torch
from torch.distributed.tensor._op_schema import (
OpSchema,
OpSpec,
OpStrategy,
StrategyType,
)
from torch.distributed.tensor._ops.utils import is_tensor_partial, register_op_strategy
aten = torch.ops.aten
@register_op_strategy(
[
... |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.distributed as torch_dist
import torch.multiprocessing as mp
import mmengine.dist as dist
def _test_get_backend_non_dist():
assert dist.get_backend() is None
def _test_get_world_size_non_dist():
assert dist.g... | # Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.distributed as torch_dist
import torch.multiprocessing as mp
import mmengine.dist as dist
def _test_get_backend_non_dist():
assert dist.get_backend() is None
def _test_get_world_size_non_dist():
assert dist.g... |
from xgboost import dask as dxgb
from xgboost import testing as tm
import dask.array as da
import dask.distributed
def train_result(client, param, dtrain, num_rounds):
result = dxgb.train(
client,
param,
dtrain,
num_rounds,
verbose_eval=False,
evals=[(dtrain, "trai... | from xgboost import dask as dxgb
from xgboost import testing as tm
from hypothesis import given, strategies, assume, settings, note
import dask.array as da
import dask.distributed
def train_result(client, param, dtrain, num_rounds):
result = dxgb.train(
client,
param,
dtrain,
num... |
"""
Collection of examples for using sklearn interface
==================================================
For an introduction to XGBoost's scikit-learn estimator interface, see
:doc:`/python/sklearn_estimator`.
Created on 1 Apr 2015
@author: Jamie Hall
"""
import pickle
import numpy as np
from sklearn.datasets imp... | """
Collection of examples for using sklearn interface
==================================================
For an introduction to XGBoost's scikit-learn estimator interface, see
:doc:`/python/sklearn_estimator`.
Created on 1 Apr 2015
@author: Jamie Hall
"""
import pickle
import numpy as np
from sklearn.datasets impo... |
from typing import Any, Dict, List, Optional, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class OracleAutonomousDatabaseLoader(BaseLoader):
"""
Load from oracle adb
Autonomous Database connection can be made by either connection_s... | from typing import Any, Dict, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class OracleAutonomousDatabaseLoader(BaseLoader):
"""
Load from oracle adb
Autonomous Database connection can be made by either connection_string
... |
from keras.src import backend
from keras.src.utils.module_utils import tensorflow as tf
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
if isinstance(t, tf.TypeSpec):
spec = t
elif isinstance(t, tf.__internal__.Composite... | from keras.src import backend
from keras.src.utils.module_utils import tensorflow as tf
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
if isinstance(t, tf.TypeSpec):
spec = t
elif isinstance(t, tf.__internal__.Composite... |
# Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmdet.registry import MODELS
from .detectors_resnet import Bottleneck as _Bottleneck
from .detectors_resnet import DetectoRS_ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __ini... | # Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from .detectors_resnet import Bottleneck as _Bottleneck
from .detectors_resnet import DetectoRS_ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init_... |
"""
This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'.
Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external
to torchaudio with no labeling responsibility, so we don't want t... | """
This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'.
Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external
to torchaudio with no labeling responsibility, so we don't want t... |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
from tempfile import TemporaryDirectory
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
sys.modules['file_client'] = sys.modules['mmengine.fileio.file_client']
class MockPetrel:
_allow_symlink = False
def ... | # Copyright (c) OpenMMLab. All rights reserved.
import os
import sys
from tempfile import TemporaryDirectory
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
sys.modules['file_client'] = sys.modules['mmengine.fileio.file_client']
class MockPetrel:
_allow_symlink = False
def ... |
from torchaudio._internal.module_utils import dropping_class_support
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AddNoise,
AmplitudeToDB,
ComputeDeltas,
Convolve,
Deemphasis,
Fade,
FFTConvolve,
FrequencyMasking,
GriffinLim,
InverseMelScal... | from torchaudio._internal.module_utils import dropping_support
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AddNoise,
AmplitudeToDB,
ComputeDeltas,
Convolve,
Deemphasis,
Fade,
FFTConvolve,
FrequencyMasking,
GriffinLim,
InverseMelScale,
... |
import json
import os
from typing import Dict
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, se... | import json
import os
from typing import Dict
import torch
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: Dict[st... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook,
annealing_cos)
from mmdet.registry import HOOKS
@HOOKS.register_module()
class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
"""YOLOX learning rate... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS
from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook,
annealing_cos)
@HOOKS.register_module()
class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
"""YOLOX learning ra... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.bytes.image_bytes import ImageBytes
from docarray.typing.bytes.video_bytes import VideoBytes
__all__ = ['ImageBytes', 'VideoBytes', 'AudioBytes']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.