input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
from typing import Any, cast
import torch
from torch import nn
from .base_structured_sparsifier import BaseStructuredSparsifier
from .parametrization import FakeStructuredSparsity
class LSTMSaliencyPruner(BaseStructuredSparsifier):
"""
Prune packed LSTM weights based on saliency.
For each layer {k} insi... | # mypy: allow-untyped-defs
from typing import cast
import torch
from .base_structured_sparsifier import BaseStructuredSparsifier, FakeStructuredSparsity
class LSTMSaliencyPruner(BaseStructuredSparsifier):
"""
Prune packed LSTM weights based on saliency.
For each layer {k} inside a LSTM, we have two pack... |
__version__ = '0.14.10'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.14.9'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from collections.abc import Iterable
from pathlib import Path
from typing import Any
from tomlkit import dump, inline_table, load
from tomlkit.items import InlineTable
def _get_dep_inline_table(path: Path) -> InlineTable:
dep = inline_table()
dep.update({"path": str(path), "develop": True})
return dep
... | from pathlib import Path
from typing import Any, Dict, Iterable, Tuple
from tomlkit import dump, inline_table, load
from tomlkit.items import InlineTable
def _get_dep_inline_table(path: Path) -> InlineTable:
dep = inline_table()
dep.update({"path": str(path), "develop": True})
return dep
def add_depend... |
"""Create Package variants for PyPI distribution."""
import argparse
import os
from test_utils import PY_PACKAGE
IN_PATH = os.path.join(PY_PACKAGE, "pyproject.toml.in")
OUT_PATH = os.path.join(PY_PACKAGE, "pyproject.toml")
NCCL_WHL = """ \"nvidia-nccl-cu12 ; platform_system == 'Linux' and platform_machine != 'aa... | """Create Package variants for PyPI distribution."""
import argparse
import os
from test_utils import PY_PACKAGE
IN_PATH = os.path.join(PY_PACKAGE, "pyproject.toml.in")
OUT_PATH = os.path.join(PY_PACKAGE, "pyproject.toml")
CHOICES = ["default", "cpu", "manylinux2014"]
NCCL_WHL = """ \"nvidia-nccl-cu12 ; platfo... |
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
prompt_template = """Use the following pieces of ... | # flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
prompt_template = """Use the follow... |
import logging
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
logger = log... | import logging
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
logger = log... |
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers import Wrapper
from keras.src.layers.input_spec import InputSpec
from keras.src.utils.numerical_utils import normalize
@keras_export("keras.layers.SpectralNormalization")
class SpectralNorm... | from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers import Wrapper
from keras.src.layers.input_spec import InputSpec
from keras.src.utils.numerical_utils import normalize
@keras_export("keras.layers.SpectralNormalization")
class SpectralNorm... |
from typing import TYPE_CHECKING
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .linear import LinearOAuthHandler
from .notion import NotionOAuthHandler
from .twitter import TwitterOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAu... | from typing import TYPE_CHECKING
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .notion import NotionOAuthHandler
from .twitter import TwitterOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAuthHandler
# --8<-- [start:HANDLERS_BY_... |
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_la... | from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_la... |
"""Usage utilities."""
from typing import Callable
def _dict_int_op(
left: dict,
right: dict,
op: Callable[[int, int], int],
*,
default: int = 0,
depth: int = 0,
max_depth: int = 100,
) -> dict:
if depth >= max_depth:
msg = f"{max_depth=} exceeded, unable to combine dicts."
... | from typing import Callable
def _dict_int_op(
left: dict,
right: dict,
op: Callable[[int, int], int],
*,
default: int = 0,
depth: int = 0,
max_depth: int = 100,
) -> dict:
if depth >= max_depth:
msg = f"{max_depth=} exceeded, unable to combine dicts."
raise ValueError(m... |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
"""Product extraction pack."""
import asyncio
from typing import Any, Dict
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program.multi_modal_llm_program import (
M... | """Product extraction pack."""
import asyncio
from typing import Any, Dict
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program.multi_modal_llm_program import (
Mu... |
import json
from typing import AsyncGenerator, Dict
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.llms.bedrock import (
ALTERNATION_ERROR,
Bedrock,
_human_assistant_format,
)
TEST_CASES = {
"""Hey""": """
Human: Hey
Assistant:""",
"""
Human: Hello
Assistant... | import json
from typing import AsyncGenerator, Dict
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.llms.bedrock import (
ALTERNATION_ERROR,
Bedrock,
_human_assistant_format,
)
TEST_CASES = {
"""Hey""": """
Human: Hey
Assistant:""",
"""
Human: Hello
Assistant... |
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Minimum")
class Minimum(Merge):
"""Computes elementwise minimum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and re... | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Minimum")
class Minimum(Merge):
"""Computes elementwise minimum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and re... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_... |
_base_ = './yolact_r50_1xb8-55e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| _base_ = './yolact_r50_1x8_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from typing import Any, Iterable, Protocol, Sequence, runtime_checkable
import uuid
from llama_index.core.schema import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from docling_core.transforms.chunker import BaseChunker, HierarchicalChunker
from docling_core.types import DoclingDocument... | from typing import Any, Iterable, Protocol, Sequence, runtime_checkable
import uuid
from llama_index.core.schema import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from docling_core.transforms.chunker import BaseChunker, HierarchicalChunker
from docling_core.types import DoclingDocument... |
"""Test LLM Bash functionality."""
import os
import sys
from unittest.mock import patch
import pytest
from langchain.chains.llm import LLMChain
from langchain.evaluation.loading import load_evaluator
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
_pa... | """Test LLM Bash functionality."""
import os
import sys
from unittest.mock import patch
import pytest
from langchain.chains.llm import LLMChain
from langchain.evaluation.loading import load_evaluator
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
_pa... |
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import List, Optional
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed ... | from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import List
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding
and some target senten... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | import random
import string
import pytest
@pytest.fixture(scope='function')
def tmp_index_name():
letters = string.ascii_lowercase
random_string = ''.join(random.choice(letters) for _ in range(15))
return random_string
|
from io import BytesIO
from typing import TYPE_CHECKING, List, NamedTuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.bytes.base_bytes import BaseBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArr... | from io import BytesIO
from typing import TYPE_CHECKING, Any, List, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from doca... |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... |
"""
Elasticsearch (or Opensearch) reader over REST api.
This only uses the basic search api, so it will work with Elasticsearch and Opensearch.
"""
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_... | """
Elasticsearch (or Opensearch) reader over REST api.
This only uses the basic search api, so it will work with Elasticsearch and Opensearch.
"""
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama... |
import argparse
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a Deployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: ar... | import argparse
from jina.enums import ProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a Deployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`Netwo... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.5'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versi... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.4'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versi... |
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_da... | """
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_da... |
__version__ = '0.13.14'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
| __version__ = '0.13.13'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List ... | # coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List ... |
from dataclasses import dataclass, fields
import numpy as np
import pytest
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
TransformerMixin,
)
from sklearn.pipeline import Pipeline
from sklearn.utils import (
Tags,
get_tags,
)
from sklearn.utils.estimator_checks impo... | from dataclasses import dataclass, fields
import numpy as np
import pytest
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
TransformerMixin,
)
from sklearn.pipeline import Pipeline
from sklearn.utils import (
Tags,
get_tags,
)
from sklearn.utils.estimator_checks impo... |
import os
import sys
import pytest
import torch
import torchaudio
from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import sdr
@pytes... | import os
import sys
import pytest
import torch
import torchaudio
from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB_PLUS
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import sdr
@pytest.mark.parametrize(
... |
from abc import abstractmethod
import logging
from typing import Any, Dict, List, Optional
from llama_index.core.graph_stores.types import GraphStore
from .neptune import refresh_schema
logger = logging.getLogger(__name__)
class NeptuneBaseGraphStore(GraphStore):
"""
This is an abstract base class that repre... | from abc import abstractmethod
import logging
from typing import Any, Dict, List, Optional
from llama_index.core.graph_stores.types import GraphStore
from .neptune import refresh_schema
logger = logging.getLogger(__name__)
class NeptuneBaseGraphStore(GraphStore):
"""This is an abstract base class that represents... |
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_s... | # pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_s... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_... | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_... |
import json
import os
import pytest
from jina import __version__
from jina.hubble import HubExecutor
from jina.hubble.hubio import HubIO
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, ... | import json
import os
import pytest
from jina import __version__
from jina.hubble import HubExecutor
from jina.hubble.hubio import HubIO
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, ... |
from langchain_core.documents import BaseDocumentTransformer, Document
__all__ = ["BaseDocumentTransformer", "Document"]
| from langchain_core.documents import BaseDocumentTransformer, Document
__all__ = ["Document", "BaseDocumentTransformer"]
|
import logging
import os
import torch
from torchaudio._internal import download_url_to_file, module_utils as _mod_utils
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"a... | import logging
import os
import torch
from torchaudio._internal import (
download_url_to_file,
module_utils as _mod_utils,
)
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ... |
def list_all_runtimes():
"""List all public runtimes that can be used directly with :class:`jina.orchestrate.pods.Pod`
# noqa: DAR101
# noqa: DAR201
"""
from jina.serve.runtimes.base import BaseRuntime
from jina.serve.runtimes.worker import WorkerRuntime
return [
k
for k, s... | def list_all_runtimes():
"""List all public runtimes that can be used directly with :class:`jina.orchestrate.pods.Pod`
# noqa: DAR101
# noqa: DAR201
"""
from jina.serve.runtimes.base import BaseRuntime
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFuncti... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFuncti... |
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
... | from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
m... |
# Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
"... | # Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
... |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor i... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.vide... |
import re
from io import BytesIO
from pathlib import Path
from typing import Any, Type
import numpy as np
import pytest
from langchain_core.documents.base import Blob
from langchain_core.language_models import FakeMessagesListChatModel
from langchain_core.messages import ChatMessage
from langchain_community.document_... | import re
from pathlib import Path
from typing import Any, Type
import pytest
from langchain_core.documents.base import Blob
from langchain_core.language_models import FakeMessagesListChatModel
from langchain_core.messages import ChatMessage
from langchain_community.document_loaders.parsers.images import (
LLMIma... |
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
L... | from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
L... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
import spacy
from jina import Document, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from ...spacy_text_encoder import SpacyTextEncoder
cur_dir = ... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
import spacy
from jina import Document, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from jinahub.encoder.spacy_text_encoder import SpacyTextEncode... |
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking... | import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking... |
from ...models.controlnets.multicontrolnet import MultiControlNetModel
from ...utils import deprecate, logging
logger = logging.get_logger(__name__)
class MultiControlNetModel(MultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `MultiControlNetModel` from `diffus... | from ...models.controlnets.multicontrolnet import MultiControlNetModel
from ...utils import deprecate, logging
logger = logging.get_logger(__name__)
class MultiControlNetModel(MultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `MultiControlNetModel` from `diffus... |
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, ... | _base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, ... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.saved_model import ExportArchive
| """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.export_lib import ExportArchive
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.meta import LlamaLLM
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in LlamaLLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
| from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.meta import LlamaLLM
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in LlamaLLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
ga... | _base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
ga... |
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision... | from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedI... |
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
... | _base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
... |
from typing import TYPE_CHECKING
import tensorflow as tf
if TYPE_CHECKING: # pragma: no cover
from tensorflow import Tensor
import numpy
def _get_tf_device(device: str):
return tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
def cosine(
x_mat: 'Tensor', y_mat: 'Tensor', eps: floa... | from typing import TYPE_CHECKING
import tensorflow as tf
if TYPE_CHECKING:
from tensorflow import Tensor
import numpy
def _get_tf_device(device: str):
return tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
def cosine(
x_mat: 'Tensor', y_mat: 'Tensor', eps: float = 1e-7, device: st... |
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py310
@pytest.fixture(
name="client",
params=[
"tutorial003_05",
pytest.param("tutorial003_05_py310", marks=needs_py310),
],
)
def get_client(request: pytest.FixtureRequest):
mod = imp... | from fastapi.testclient import TestClient
from docs_src.response_model.tutorial003_05 import app
client = TestClient(app)
def test_get_portal():
response = client.get("/portal")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Here's your interdimensional portal."... |
from dataclasses import dataclass
from typing import List, Union
import numpy as np
import PIL.Image
import torch
from diffusers.utils import BaseOutput, get_logger
logger = get_logger(__name__)
@dataclass
class CosmosPipelineOutput(BaseOutput):
r"""
Output class for Cosmos any-to-world/video pipelines.
... | from dataclasses import dataclass
import torch
from diffusers.utils import BaseOutput
@dataclass
class CosmosPipelineOutput(BaseOutput):
r"""
Output class for Cosmos pipelines.
Args:
frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
List of video outputs - It ca... |
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
| from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
|
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while mai... | """
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while mai... |
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
... | from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
... |
import numpy as np
import keras
from keras import Model
from keras import initializers
from keras import layers
from keras import losses
from keras import metrics
from keras import ops
from keras import optimizers
class MyDense(layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=na... | import numpy as np
import keras
from keras import Model
from keras import initializers
from keras import layers
from keras import losses
from keras import metrics
from keras import ops
from keras import optimizers
class MyDense(layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=na... |
import numpy as np
import torch
from docarray import Document, DocumentArray, Image, Text
from docarray.typing import (
AnyTensor,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArray... | import numpy as np
import torch
from docarray import Document, DocumentArray, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
Tensor,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmb... |
__version__ = '0.13.31'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.13.30'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
import functools
from typing import (
Optional,
TYPE_CHECKING,
Iterable,
Callable,
Dict,
)
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray import Document
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import (
DocumentArraySourceType,
)
... | import functools
from typing import (
Optional,
TYPE_CHECKING,
Iterable,
Callable,
Dict,
)
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray import Document
if TYPE_CHECKING:
from docarray.typing import (
DocumentArraySourceType,
)
def needs_id2offset... |
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
... | _base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=... |
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this cl... | from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this cl... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.4'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versi... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.3'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versi... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_chann... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_chann... |
from typing import Dict
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super(Normalize, self).__init__()
def forward(self, features: Dict[str, Tensor]) -> Dict[str, Tensor... | from typing import Dict
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self):
super(Normalize, self).__init__()
def forward(self, features: Dict[str, Tensor]):
features.update({"... |
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
**Author**: `Moto Hira <moto@meta.com>`__
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transfo... | # -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(tor... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixtur... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixture... |
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTenso... | from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTenso... |
"""Test the criteria eval chain."""
import pytest
from langchain.evaluation.criteria.eval_chain import (
_SUPPORTED_CRITERIA,
Criteria,
CriteriaEvalChain,
CriteriaResultOutputParser,
LabeledCriteriaEvalChain,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake... | """Test the criteria eval chain."""
import pytest
from langchain.evaluation.criteria.eval_chain import (
_SUPPORTED_CRITERIA,
Criteria,
CriteriaEvalChain,
CriteriaResultOutputParser,
LabeledCriteriaEvalChain,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake... |
_base_ = 'deformable-detr_refine_r50_16xb2-50e_coco.py'
model = dict(as_two_stage=True)
| _base_ = 'deformable-detr_refine_r50_16xb2-50e_coco.py'
model = dict(bbox_head=dict(as_two_stage=True))
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
from ba... | from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit
from backend.data.execution impor... |
import logging
import tempfile
import typing
import autogpt_libs.auth.depends
import fastapi
import fastapi.responses
import prisma.enums
import backend.server.v2.store.db
import backend.server.v2.store.exceptions
import backend.server.v2.store.model
import backend.util.json
logger = logging.getLogger(__name__)
rou... | import logging
import tempfile
import typing
import autogpt_libs.auth.depends
import fastapi
import fastapi.responses
import prisma.enums
import backend.server.v2.store.db
import backend.server.v2.store.exceptions
import backend.server.v2.store.model
import backend.util.json
logger = logging.getLogger(__name__)
rou... |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class InstaBoost:
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
... | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class InstaBoost:
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
... |
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.document.io.json import orjson_dumps
from docarray.typing import AudioNdArray, AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUD... | from typing import Optional
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.document.io.json import orjson_dumps
from docarray.typing import AudioNdArray, AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
... |
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=Tru... | _base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=Tru... |
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0,... | import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim):
""... |
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_C... | import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_C... |
import os
import random
import time
from typing import Dict
import numpy as np
import pytest
from jina import Document, Flow, DocumentArray, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.searcher.compound.NumpyLMDBSearcher.npfile import NumpyLMDBSearcher
from jinahub.indexers.storage... | import os
import random
import time
from typing import Dict
import numpy as np
import pytest
from jina import Document, Flow, DocumentArray, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.searcher.compound.NumpyLMDBSearcher import NumpyLMDBSearcher
from jinahub.indexers.storage.LMDBSt... |
from typing import (
Union,
TYPE_CHECKING,
TypeVar,
Sequence,
Optional,
List,
Dict,
Generator,
Iterable,
Tuple,
ForwardRef,
)
if TYPE_CHECKING:
import scipy.sparse
import tensorflow
import torch
import numpy as np
from PIL.Image import Image as PILImage
... | from typing import (
Union,
TYPE_CHECKING,
TypeVar,
Sequence,
Optional,
List,
Dict,
Generator,
Iterable,
Tuple,
ForwardRef,
)
if TYPE_CHECKING:
import scipy.sparse
import tensorflow
import torch
import numpy as np
from PIL.Image import Image as PILImage
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprec... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprec... |
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDoc):
tensor: Optional[... | from typing import Optional
import numpy as np
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDoc):
tensor: Optional[... |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... |
import json
import pytest
import types
from requests import Response
from unittest import mock
from typing import Optional, Type
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.siliconflow import SiliconFlowEmbedding
class MockAsyncResponse:
def __init__(self, json_data) -> None:... | import json
import pytest
import types
from requests import Response
from unittest import mock
from typing import Optional, Type
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.siliconflow import SiliconFlowEmbedding
class MockAsyncResponse:
def __init__(self, json_data) -> None:... |
import os
import time
import pytest
@pytest.fixture(scope='function', autouse=True)
def patched_random_port(mocker):
print('using random port fixture...')
used_ports = set()
from jina.helper import random_port
def _random_port():
for i in range(10):
_port = random_port()
... | import os
import time
import pytest
@pytest.fixture(scope='function', autouse=True)
def patched_random_port(mocker):
print('using random port fixture...')
used_ports = set()
from jina.helper import random_port
def _random_port():
for i in range(10):
_port = random_port()
... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
from mmdet.engine.hooks import YOLOXModeSwitchHook
class TestYOLOXModeSwitchHook(TestCase):
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_is_model_wrapper_and_p... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
from mmdet.engine.hooks import YOLOXModeSwitchHook
class TestYOLOXModeSwitchHook(TestCase):
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_is_model_wrapper_and_p... |
from llama_index.vector_stores.faiss.base import FaissVectorStore
from llama_index.vector_stores.faiss.map_store import FaissMapVectorStore
__all__ = ["FaissVectorStore", "FaissMapVectorStore"]
| from llama_index.vector_stores.faiss.base import FaissVectorStore
__all__ = ["FaissVectorStore"]
|
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
AudioBlock,
DocumentBlock,
)
from l... | from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
AudioBlock,
)
from llama_index.core.llm... |
"""Configuration for unit tests."""
from collections.abc import Sequence
from importlib import util
import pytest
from pytest import Config, Function, Parser
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
ac... | """Configuration for unit tests."""
from importlib import util
from typing import Dict, Sequence
import pytest
from pytest import Config, Function, Parser
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
actio... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import numpy as np
import torch
from mmdet.datasets import OpenImagesDataset
from mmdet.evaluation import OpenImagesMetric
from mmdet.utils import register_all_modules
class TestOpenImagesMetric(unittest.TestCase):
def _create_dummy_results(self):... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import numpy as np
import torch
from mmdet.datasets import OpenImagesDataset
from mmdet.evaluation import OpenImagesMetric
from mmdet.utils import register_all_modules
class TestOpenImagesMetric(unittest.TestCase):
def _create_dummy_results(self):... |
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.38"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_in... | """Top-level imports for LlamaIndex."""
__version__ = "0.12.37"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_in... |
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt... | """
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.9.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.4'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... |
import pytest
from jina import Client
from jina.enums import GatewayProtocolType
@pytest.mark.parametrize(
'protocol, gateway_type',
[
('http', GatewayProtocolType.HTTP),
('grpc', GatewayProtocolType.GRPC),
('ws', GatewayProtocolType.WEBSOCKET),
(None, None),
],
)
@pytest.... | import pytest
from jina import Client
from jina.enums import GatewayProtocolType
@pytest.mark.parametrize(
'protocol, gateway_type',
[
('http', GatewayProtocolType.HTTP),
('grpc', GatewayProtocolType.GRPC),
('ws', GatewayProtocolType.WEBSOCKET),
(None, None),
],
)
@pytest.... |
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import Vi... | from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from do... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.