input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `Senten... | """
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `Senten... |
from typing import Dict, List
import requests
HEALTH_CHECK_OP = '%2Fgrpc.health.v1.Health%2FCheck'
def parse_string_jaeger_tags(jaeger_tags: List) -> Dict[str, str]:
"""Parse jaeger tags into a dictionary"""
return {i['key']: i['value'] for i in jaeger_tags if i['type'] == 'string'}
def get_last_health_ch... | import requests
from typing import List, Dict
HEALTH_CHECK_OP = '%2Fgrpc.health.v1.Health%2FCheck'
def parse_string_jaeger_tags(jaeger_tags: List) -> Dict[str, str]:
"""Parse jaeger tags into a dictionary"""
return {i['key']: i['value'] for i in jaeger_tags if i['type'] == 'string'}
def get_last_health_che... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import STFT
from ... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
f... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion ... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head wit... |
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .c... | from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .cor... |
from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE =... | from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE =... |
import pytest
from jina import Client
from jina.enums import ProtocolType
@pytest.mark.parametrize(
'protocol, gateway_type',
[
('http', ProtocolType.HTTP),
('grpc', ProtocolType.GRPC),
('ws', ProtocolType.WEBSOCKET),
(None, None),
],
)
@pytest.mark.parametrize('tls', [Tru... | import pytest
from jina import Client
from jina.enums import GatewayProtocolType
@pytest.mark.parametrize(
'protocol, gateway_type',
[
('http', GatewayProtocolType.HTTP),
('grpc', GatewayProtocolType.GRPC),
('ws', GatewayProtocolType.WEBSOCKET),
(None, None),
],
)
@pytest.... |
"""Standard LangChain interface tests"""
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
... | """Standard LangChain interface tests"""
from typing import Optional, Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_mistralai import Chat... |
import numpy as np
import pytest
import torch
from docarray import BaseDocument
from docarray.typing import AnyTensor, NdArray, TorchTensor
try:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp # type: ignore
from docarray.typing import TensorFlowTensor
except (ImportError, Ty... | import numpy as np
import torch
from docarray import BaseDocument
from docarray.typing import AnyTensor, NdArray, TorchTensor
def test_set_tensor():
class MyDocument(BaseDocument):
tensor: AnyTensor
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert i... |
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of docume... | import csv
import logging
import os
from typing import Optional
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it ... |
import subprocess
import pytest
from dpr_text import DPRTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') f... | import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...dpr_text import DPRTextEncoder
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here... |
#!/usr/bin/env python
# Sorts what's new entries with per-module headings.
# Pass what's new entries on stdin.
import re
import sys
from collections import defaultdict
LABEL_ORDER = ["MajorFeature", "Feature", "Efficiency", "Enhancement", "Fix", "API"]
def entry_sort_key(s):
if s.startswith("- |"):
retu... | #!/usr/bin/env python
# Sorts what's new entries with per-module headings.
# Pass what's new entries on stdin.
import re
import sys
from collections import defaultdict
LABEL_ORDER = ["MajorFeature", "Feature", "Efficiency", "Enhancement", "Fix", "API"]
def entry_sort_key(s):
if s.startswith("- |"):
retu... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.legacy.saving.serialization import (
serialize_keras_object as seri... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import deserialize_keras_object
from keras.src.legacy.saving.serialization import serialize_keras_object
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.... | from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.... |
import argparse
from abc import ABC
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which th... | import argparse
from abc import ABC
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which th... |
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck imp... | from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import Auto... |
__version__ = '0.14.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.14.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import (
AlibabaCloudOpenSearch,
AlibabaCloudOpenSearchSettings,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import (
AlibabaCloudOpenSearch,
AlibabaCloudOpenSearchSettings,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for... |
"""Init params."""
from llama_index.finetuning.embeddings.adapter import EmbeddingAdapterFinetuneEngine
from llama_index.finetuning.embeddings.sentence_transformer import (
SentenceTransformersFinetuneEngine,
)
__all__ = ["EmbeddingAdapterFinetuneEngine", "SentenceTransformersFinetuneEngine"]
| """Init params."""
from llama_index.finetuning.embeddings.adapter import EmbeddingAdapterFinetuneEngine
from llama_index.finetuning.embeddings.sentence_transformer import (
SentenceTransformersFinetuneEngine,
)
__all__ = ["EmbeddingAdapterFinetuneEngine", "SentenceTransformersFinetuneEngine"]
|
import functools
from collections import defaultdict
import torch
from torch._export.passes._node_metadata_hook import (
_node_metadata_hook,
_set_node_metadata_hook,
)
from torch._library.fake_profile import OpProfile, TensorMetadata
def insert_custom_op_guards(gm: torch.fx.GraphModule, ops_to_guard: set[st... | import functools
from collections import defaultdict
import torch
from torch._export.passes._node_metadata_hook import (
_node_metadata_hook,
_set_node_metadata_hook,
)
from torch._library.fake_profile import OpProfile, TensorMetadata
def insert_custom_op_guards(gm: torch.fx.GraphModule, ops_to_guard: set[st... |
# coding=utf-8
# Copyright 2025 Advanced Micro Devices, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/... | # coding=utf-8
# Copyright 2025 Advanced Micro Devices, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/... |
"""Snowflake Reader."""
import logging
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from sqlalchemy import create_engine, text
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
logger = logging.getL... | """Snowflake Reader."""
import logging
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from sqlalchemy import create_engine, text
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
logger = logging.getL... |
import warnings
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.LeakyReLU")
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit activation layer.
This layer allows a small gradient when the u... | import warnings
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.LeakyReLU")
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit activation layer.
This layer allows a small gradient when the u... |
_base_ = './cascade-mask-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| _base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of N... | from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of N... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional
from jina import Document, DocumentArray
from jina.logging.logger import JinaLogger
from pymongo import MongoClient
from pymongo.errors import BulkWriteError
class MongoHandler:
def ... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional
from pymongo import MongoClient
from pymongo.errors import BulkWriteError
from jina.logging.logger import JinaLogger
from jina import Document, DocumentArray
class MongoHandler:
def ... |
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dic... | # dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
d... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser... |
import ast
from typing import List, Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class PythonFileToolSpec(BaseToolSpec):
spec_functions = ["function_definitions", "get_function", "get_functions"]
def __init__(self, file_name: str) -> None:
f = open(file_name).read()
... | import ast
from typing import List, Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class PythonFileToolSpec(BaseToolSpec):
spec_functions = ["function_definitions", "get_function", "get_functions"]
def __init__(self, file_name: str) -> None:
f = open(file_name).read()
... |
"""Simple reader that reads wikipedia."""
from typing import Any, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class WikipediaReader(BasePydanticReader):
"""
Wikipedia reader.
Reads a page.
"""
is_remote: bool = True
def _... | """Simple reader that reads wikipedia."""
from typing import Any, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class WikipediaReader(BasePydanticReader):
"""Wikipedia reader.
Reads a page.
"""
is_remote: bool = True
def __init... |
from io import BytesIO
from typing import TYPE_CHECKING, Any, List, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from doca... | from io import BytesIO
from typing import TYPE_CHECKING, Any, List, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from doca... |
_base_ = 'tridentnet_r50-caffe_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 7... | _base_ = 'tridentnet_r50_caffe_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 7... |
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, a... | """LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, a... |
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
... | from collections import defaultdict
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, ... |
"""Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
def _new_data_request_from_batch(
batch,
data_type: DataInputType,
endpoin... | """Module for helper functions for clients."""
from typing import Tuple, Optional
from docarray import Document, DocumentArray
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
def _new_data_request_from_batch(
batch, data_type: DataInputType, endpoint: str, target: Optional[st... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead, LDHead
def test_ld_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'... | import mmcv
import torch
from mmdet.models.dense_heads import GFLHead, LDHead
def test_ld_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmc... |
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking... | import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking... |
from pathlib import Path
import numpy as np
import pytest
from custom_image_torch_encoder import CustomImageTorchEncoder
from jina import Document, DocumentArray, Executor
@pytest.fixture
def encoder():
model_dir = Path(__file__).parents[1] / 'model'
return CustomImageTorchEncoder(
model_definition_f... | from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...custom_image_torch_encoder import CustomImageTorchEncoder
@pytest.fixture
def encoder():
model_dir = Path(__file__).parents[1] / 'model'
return CustomImageTorchEncoder(
model_definiti... |
"""Test LASER embeddings."""
import pytest
from langchain_community.embeddings.laser import LaserEmbeddings
@pytest.mark.filterwarnings("ignore::UserWarning:")
@pytest.mark.parametrize("lang", [None, "lus_Latn", "english"])
def test_laser_embedding_documents(lang: str) -> None:
"""Test laser embeddings for docu... | """Test LASER embeddings."""
import pytest
from langchain_community.embeddings.laser import LaserEmbeddings
@pytest.mark.filterwarnings("ignore::UserWarning:")
@pytest.mark.parametrize("lang", [None, "lus_Latn", "english"])
def test_laser_embedding_documents(lang: str) -> None:
"""Test laser embeddings for docu... |
"""Vector stores."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.vectorstores.base import VST, VectorStore, VectorStoreRetriever
from langchain_core.vectorstores.in_memory import InMemoryVectorStore
__all__ = (
"VST",
"I... | """Vector stores."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.vectorstores.base import VST, VectorStore, VectorStoreRetriever
from langchain_core.vectorstores.in_memory import InMemoryVectorStore
__all__ = (
"VectorStore"... |
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from sklearn import metrics
from sklearn.ensemble import (
BaggingClassifier,
BaggingRegressor,
IsolationForest,
StackingClassifier,
StackingRegressor,
)
from sklearn.utils._testing import assert_docstring... | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from sklearn import metrics
from sklearn.ensemble import StackingClassifier, StackingRegressor
from sklearn.utils._testing import assert_docstring_consistency, skip_if_no_numpydoc
CLASS_DOCSTRING_CONSISTENCY_CASES = [
{
... |
import os
from torchaudio.datasets import snips
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
_SAMPLE_RATE = 16000
_SPEAKERS = [
"Aditi",
"Amy",
"Brian",
"Emma",
"Geraint",
"Ivy",
"Joanna",
"Joey",
"Justin",
"Kendra",
... | import os
from torchaudio.datasets import snips
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
_SAMPLE_RATE = 16000
_SPEAKERS = [
"Aditi",
"Amy",
"Brian",
"Emma",
"Geraint",
"Ivy",
"Joanna",
"Joey",
"Justin",
"Kendra",
... |
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.u... | import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_... |
import multiprocessing
import random
import time
from functools import partial
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.types.request.data import Response
NUM_REQUESTS = 5
class MyExecutor(Executor):
@requests(on='/ping')
def ping(self, **kwargs):
... | import pytest
from jina import Flow, Executor, Client, requests, DocumentArray, Document
import multiprocessing
import random
import time
from functools import partial
from jina.types.request.data import Response
NUM_REQUESTS = 5
class MyExecutor(Executor):
@requests(on='/ping')
def ping(self, **kwargs):
... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'... |
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
w... | """
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
w... |
from typing import Any, Callable, Optional, Tuple
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default:... | from typing import Any, Callable, Optional, Tuple
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default:... |
"""langchain-core version information and utilities."""
VERSION = "0.3.63"
| """langchain-core version information and utilities."""
VERSION = "0.3.62"
|
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
... | """Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
#... |
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorc... | from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorc... |
import os
from pathlib import Path
from typing import Any, Callable, Optional, Union
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__ge... | import os
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to overri... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class TripletLoss(BaseModule):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class TripletLoss(BaseModule):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense... |
import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import ops
from keras.src import testing
from keras.src.models import Sequential
class TimeDistributedTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
d... | import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import ops
from keras.src import testing
class TimeDistributedTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_l... |
from typing import Optional
import numpy as np
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDocument):
t... | from typing import Optional
import numpy as np
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDocument):
tens... |
"""Tests for the InMemoryStore class."""
import pytest
from langchain_core.stores import InMemoryStore
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
class TestInMemoryStore(BaseStoreSyncTests):
@pytest.fixture
def three_values(self) -> tuple[st... | """Tests for the InMemoryStore class."""
from typing import Tuple
import pytest
from langchain_core.stores import InMemoryStore
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
class TestInMemoryStore(BaseStoreSyncTests):
@pytest.fixture
def thre... |
import warnings
from typing import Optional, Tuple, TypeVar
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
... | import warnings
from typing import Optional, Tuple, TypeVar
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handli... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handli... |
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .faster_rcnn import FasterRCNN
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone,
... | from ..builder import DETECTORS
from .faster_rcnn import FasterRCNN
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
... |
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
... | _base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_"""
def __init__(self,
backbone,
... |
from .functional import add_noise, convolve, fftconvolve
__all__ = ["add_noise", "convolve", "fftconvolve"]
| from .functional import convolve, fftconvolve
__all__ = ["convolve", "fftconvolve"]
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 6, 40, 3],
mlp_ratios=(4, 4, 4, 4),
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b5.pth')),
neck=dict(in_channe... | _base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 6, 40, 3],
mlp_ratios=(4, 4, 4, 4),
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b5.pth')),
neck=dict(in_channe... |
_base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
... | _base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmclassification dev-1.x branch
# import mmcls.models to trigger register_module in mmcls
custom_imports... |
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transformers.Tr... | from __future__ import annotations
from dataclasses import dataclass, field
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transfor... |
import asyncio
import time
import pytest
from jina import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator, _RequestsCounter
def slow_blocking_generator():
for i in range(2):
yield Document(id=str(i))
time.sleep(2)
@pytest.m... | import asyncio
import time
import pytest
from jina import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator, RequestsCounter
def slow_blocking_generator():
for i in range(2):
yield Document(id=str(i))
time.sleep(2)
@pytest.ma... |
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natura... | """
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natura... |
from __future__ import annotations
from typing_extensions import deprecated
from sentence_transformers import InputExample
from sentence_transformers.cross_encoder.evaluation.CEClassificationEvaluator import CEClassificationEvaluator
@deprecated(
"This evaluator has been deprecated in favor of the more general ... | from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import average_precision_score
from sentence_transformers import InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinar... |
from __future__ import annotations
import json
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_t... | from __future__ import annotations
import json
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_t... |
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for indivi... | """
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for indivi... |
"""base multi modal retriever."""
from abc import abstractmethod
from typing import List
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.image_retriever import BaseImageRetriever
from llama_index.core.indices.query.schema import QueryType
from llama_index.core.schema import NodeWit... | """base multi modal retriever."""
from abc import abstractmethod
from typing import List
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.image_retriever import BaseImageRetriever
from llama_index.core.indices.query.schema import QueryType
from llama_index.core.schema import NodeWit... |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optimizer = dict(type='SGD', lr=0.08, momen... | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optimizer = dict(type='SGD', lr=0.08, momen... |
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdAr... | from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdAr... |
# Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, demo_track_inputs,
get_detector_cfg, get_roi_head_cfg, random_boxes,
... | # Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg, replace_to_ceph)
__all__ = [
... |
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjs... | import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjs... |
"""Test LLM program."""
from unittest.mock import MagicMock
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
LLMMetadata,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.bridge.pydantic import BaseModel
from typing import List, Optional, Union, Any
from ... | """Test LLM program."""
from unittest.mock import MagicMock
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
LLMMetadata,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.bridge.pydantic import BaseModel
from typing import List, Optional, Union, Any
from ... |
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTranslationEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SP... | from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTranslationEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransfor... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone,
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting imp... | # Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting imp... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import UnstructuredMarkdownLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import UnstructuredMarkdownLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... |
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
... | import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
... |
"""Standard LangChain interface tests"""
import os
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")... | """Standard LangChain interface tests"""
import os
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API... |
import os
from pathlib import Path
from typing import Any, Callable, Optional, Union
from .folder import default_loader
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~... | import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virgini... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.wrappers.sklearn_wrapper import (
SKLearnClassifier as SKLearnClassifier,
)
from keras.src.wrappers.sklearn_wrapper import (
SKLearnRegressor as SKLearnRegressor,
)
from keras... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.wrappers.sklearn_wrapper import SKLearnClassifier
from keras.src.wrappers.sklearn_wrapper import SKLearnRegressor
from keras.src.wrappers.sklearn_wrapper import SKLearnTransformer
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import CenterNetHead
def test_center_head_loss():
"""Tests center head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3)... | import numpy as np
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import CenterNetHead
def test_center_head_loss():
"""Tests center head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape'... |
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: in... | import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: in... |
# CREDITS: https://github.com/openai/CLIP
import gzip
import html
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return str(Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""
... | # CREDITS: https://github.com/openai/CLIP
import gzip
import html
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return str(Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""
... |
from typing import Dict, Optional, Sequence
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from transformers import CLIPModel, CLIPTokenizer
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using a CLIP model.
... | import os
from typing import Dict, List, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from transformers import CLIPTokenizer, CLIPModel
class CLIPTextEncoder(Executor):
"""... |
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RU... | # Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, LOOPS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS,... |
from typing import Any, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.graph_stores.types import PropertyGraphStore
from llama_index.core.indices.property_graph.sub_retrievers.base import BasePGRetriever
from llama_index.core.llms import LLM
from llama_index.core.prom... | from typing import Any, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.graph_stores.types import PropertyGraphStore
from llama_index.core.indices.property_graph.sub_retrievers.base import BasePGRetriever
from llama_index.core.llms import LLM
from llama_index.core.prom... |
from langchain_core.agents import AgentAction
def format_log_to_str(
intermediate_steps: list[tuple[AgentAction, str]],
observation_prefix: str = "Observation: ",
llm_prefix: str = "Thought: ",
) -> str:
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
i... | from typing import List, Tuple
from langchain_core.agents import AgentAction
def format_log_to_str(
intermediate_steps: List[Tuple[AgentAction, str]],
observation_prefix: str = "Observation: ",
llm_prefix: str = "Thought: ",
) -> str:
"""Construct the scratchpad that lets the agent continue its thoug... |
import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--e... | import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--e... |
from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
... | from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
... |
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.10.0"
@keras_export("keras.version")
def version():
return __version__
| from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.9.0"
@keras_export("keras.version")
def version():
return __version__
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
import json
import logging
from typing import List
from langchain_core._api import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_ST... | import json
import logging
from typing import List
from langchain_core._api import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_ST... |
from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Run... | from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Run... |
from llama_index.core.node_parser.text.sentence_window import (
SentenceWindowNodeParser,
)
from llama_index.core.schema import Document
def test_split_and_window() -> None:
document = Document(text="This is a test 1. This is a test 2. This is a test 3.")
node_parser = SentenceWindowNodeParser.from_defau... | from llama_index.core.node_parser.text.sentence_window import (
SentenceWindowNodeParser,
)
from llama_index.core.schema import Document
def test_split_and_window() -> None:
document = Document(text="This is a test 1. This is a test 2. This is a test 3.")
node_parser = SentenceWindowNodeParser.from_defau... |
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class NestedDoc(BaseDoc):
... | import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class NestedDoc(BaseDoc):
... |
import pytest
import torchaudio
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(EMFORMER_RNNT_BASE_LIBRISPEECH, "en", "i have that curio... | import pytest
import torchaudio
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
@pytest.mark.parametrize(
"bundle,lang,expected",
[(EMFORMER_RNNT_BASE_LIBRISPEECH, "en", "i have that curiosity beside me at this moment")],
)
def test_rnnt(bundle, sample_speech, expected):
feature_extractor ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.