input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
# TODO: Awaiting refactoring
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# Set evaluation interval
evaluation = dict(interval=2)
# Set checkpoint interval
checkpoint_config = dict(interval=... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# Set evaluation interval
evaluation = dict(interval=2)
# Set checkpoint interval
checkpoint_config = dict(interval=4)
# yapf:disable
log_config... |
import pytest
from typing import List
from unittest.mock import patch, MagicMock
from llama_index.embeddings.ibm import WatsonxEmbeddings
class TestWasonxLLMInference:
TEST_URL = "https://us-south.ml.cloud.ibm.com"
TEST_APIKEY = "apikey"
TEST_PROJECT_ID = "project_id"
TEST_MODEL = "test_model"
... | import pytest
from typing import List
from unittest.mock import patch, MagicMock
from llama_index.embeddings.ibm import WatsonxEmbeddings
class TestWasonxLLMInference:
TEST_URL = "https://us-south.ml.cloud.ibm.com"
TEST_APIKEY = "apikey"
TEST_PROJECT_ID = "project_id"
TEST_MODEL = "test_model"
... |
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Be... | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Be... |
"""
The :mod:`sklearn._loss` module includes loss function classes suitable for
fitting classification and regression tasks.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from .loss import (
AbsoluteError,
HalfBinomialLoss,
HalfGammaLoss,
HalfMultinomialLoss,
H... | """
The :mod:`sklearn._loss` module includes loss function classes suitable for
fitting classification and regression tasks.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from .loss import (
AbsoluteError,
HalfBinomialLoss,
HalfGammaLoss,
HalfMultinomialLoss,
H... |
import os
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredPowerPointLoader(UnstructuredFileLoader):
"""Load `Microsoft PowerPoint` files using `Unstructu... | import os
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredPowerPointLoader(UnstructuredFileLoader):
"""Load `Microsoft PowerPoint` files using `Unstructu... |
from workflows.decorators import StepConfig # noqa
from workflows.decorators import step as upstream_step # noqa
from typing import Callable, Any
def step(*args: Any, **kwargs: Any) -> Callable:
# Remove old, unused parameter
kwargs.pop("pass_context", None)
return upstream_step(*args, **kwargs)
| from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from .errors import WorkflowValidationError
from .utils import (
ServiceDefinition,
inspect_signature,
is_free_function,
validate_step_signature,
)
from .resource im... |
from typing import Any
from langchain_core.memory import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""Memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> list[str]:
"""Return memory variables."""
return self.... | from typing import Any, Dict, List
from langchain_core.memory import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""Memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
... |
import abc
import importlib
import pathlib
from typing import Any, Collection, Dict, Iterator, List, Optional, Sequence, Union
from torchdata.datapipes.iter import IterDataPipe
from torchvision.datasets.utils import verify_str_arg
from ._resource import OnlineResource
class Dataset(IterDataPipe[Dict[str, Any]], abc... | import abc
import importlib
import pathlib
from typing import Any, Collection, Dict, Iterator, List, Optional, Sequence, Union
from torch.utils.data import IterDataPipe
from torchvision.datasets.utils import verify_str_arg
from ._resource import OnlineResource
class Dataset(IterDataPipe[Dict[str, Any]], abc.ABC):
... |
import copy
import warnings
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
... | import copy
import warnings
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
# Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
import io
from typing import Tuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.bytes.base_bytes import BaseBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio import AudioNdArray
from docarray.utils._internal.misc import import_libr... | import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio... |
from __future__ import annotations
from .CrossEncoder import CrossEncoder
__all__ = ["CrossEncoder"]
| from .CrossEncoder import CrossEncoder
__all__ = ["CrossEncoder"]
|
__version__ = '0.37.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()... | __version__ = '0.37.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()... |
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f... | import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main"... |
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jin... | import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jin... |
"""Internal representation of a structured query language."""
from __future__ import annotations
from abc import ABC, abstractmethod
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional, Union
from pydantic import BaseModel
if TYPE_CHECKING:
from collections.abc import Sequence
class Visitor(... | """Internal representation of a structured query language."""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from enum import Enum
from typing import Any, Optional, Union
from pydantic import BaseModel
class Visitor(ABC):
"""Defines interface for IR ... |
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_tar, _load_waveform
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
SAMPLE_... | import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform, extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
SAMP... |
from collections.abc import Generator
from langchain_huggingface.llms import HuggingFacePipeline
def test_huggingface_pipeline_streaming() -> None:
"""Test streaming tokens from huggingface_pipeline."""
llm = HuggingFacePipeline.from_model_id(
model_id="openai-community/gpt2",
task="text-gene... | from collections.abc import Generator
from langchain_huggingface.llms import HuggingFacePipeline
def test_huggingface_pipeline_streaming() -> None:
"""Test streaming tokens from huggingface_pipeline."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2", task="text-generation", pipeline_kwargs=... |
# Copyright (c) OpenMMLab. All rights reserved.
import json
import os.path as osp
from typing import List, Optional
from mmengine.fileio import get_local_path
from mmdet.registry import DATASETS
from .base_det_dataset import BaseDetDataset
@DATASETS.register_module()
class ODVGDataset(BaseDetDataset):
"""object... | # Copyright (c) OpenMMLab. All rights reserved.
import json
import os.path as osp
from typing import List, Optional
from mmengine.fileio import get_local_path
from mmdet.registry import DATASETS
from .base_det_dataset import BaseDetDataset
@DATASETS.register_module()
class ODVGDataset(BaseDetDataset):
"""object... |
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
... | import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... |
"""
This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k):
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pai... | """
This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k):
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pai... |
import asyncio
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
from jina.serve.runtimes.gateway.websocket.gateway import WebSocketGateway
class WebSocketGatewayRuntime(GatewayRuntime):
"""Runtime ... | import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
class WebSocketGatewayRuntime(Gatewa... |
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675... |
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmc... | # Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmc... |
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import gzip
import lzma
import time
import faiss
import numpy as np
######## Functions to find and... | """
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import gzip
import lzma
import time
import faiss
import numpy as np
######## Functions to find and... |
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
... | _base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
... |
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import Abstract... | from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import Abstract... |
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
metainfo = {
'CLASSES': ('person', ),
'PALETTE': [
(220, 20, 60),
]
}
train_dataloader = dict(dataset=dict(metainfo=metainfo))
val_dataloader = dict(dataset=dict(metainfo=metainfo))
test_... | _base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
classes = ('person', )
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/fa... |
"""Standard LangChain interface tests."""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain... | """Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_... |
"""Test tool spec."""
from typing import List, Tuple, Union
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.tools.types import ToolMetadata
from llama_index.core.workflow import Context
class FooSchema(BaseMode... | """Test tool spec."""
from typing import List, Tuple, Union
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.tools.types import ToolMetadata
from llama_index.core.workflow import Context
class FooSchema(BaseMode... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Union[dict, tuple, list]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory ... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.registry import HOOKS
from mmengine.structures import BaseDataElement
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Rele... |
from typing import Any, Iterable, Protocol, Sequence, runtime_checkable
import uuid
from llama_index.core.schema import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from docling_core.transforms.chunker import BaseChunker, HierarchicalChunker
from docling_core.types import DoclingDocument... | from typing import Any, Iterable, Protocol, Sequence, runtime_checkable
import uuid
from llama_index.core.schema import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from docling_core.transforms.chunker import BaseChunker, HierarchicalChunker
from docling_core.types import DoclingDocument... |
import multiprocessing
import time
import grpc
import pytest
import requests
from jina import __jina_env__, __version__
from jina.proto import jina_pb2, jina_pb2_grpc
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import _generate_pod... | import multiprocessing
import time
import grpc
import pytest
import requests
from jina import __jina_env__, __version__
from jina.parsers import set_pod_parser
from jina.proto import jina_pb2, jina_pb2_grpc
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntim... |
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask
from .core import Booster, DataIter, DMatrix, QuantileDMatrix, _py_version, build_info
from .tracker import RabitTracker # noqa
fro... | """XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask
from .core import (
Booster,
DataIter,
DeviceQuantileDMatrix,
DMatrix,
QuantileDMatrix,
_py_version,
bui... |
import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-... | from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling... |
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
... | import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
... |
from pathlib import Path
from typing import Any, Optional, TypedDict
from tomlkit import load
def get_package_root(cwd: Optional[Path] = None) -> Path:
# traverse path for routes to host (any directory holding a pyproject.toml file)
package_root = Path.cwd() if cwd is None else cwd
visited: set[Path] = s... | from pathlib import Path
from typing import Any, Optional, TypedDict
from tomlkit import load
def get_package_root(cwd: Optional[Path] = None) -> Path:
# traverse path for routes to host (any directory holding a pyproject.toml file)
package_root = Path.cwd() if cwd is None else cwd
visited: set[Path] = s... |
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = Ty... | import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = Ty... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of r... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of r... |
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringProm... | """**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringProm... |
import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolMessage,
)
from langchain.agents.output_parsers.tools import ToolAgentAction
def _create_tool_message(
agent_action: ToolAgentAction,
o... | import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolMessage,
)
from langchain.agents.output_parsers.tools import ToolAgentAction
def _create_tool_message(
agent_action: ToolAgentAction, obser... |
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_p... | """Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_p... |
import torchaudio
_STREAM_READER = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
_STREAM_WRITER = [
"StreamWriter",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER
def __getattr__(name: str... | import torchaudio
_LAZILY_IMPORTED = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
torchaudio._extension._init_ffmpeg()
fr... |
_base_ = './vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_e... | _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
n... |
import json
from typing import (
Any,
Union,
)
from langchain_core._api import deprecated
from pydantic import PrivateAttr
from langchain_anthropic.chat_models import ChatAnthropic
SYSTEM_PROMPT_FORMAT = """In this environment you have access to a set of tools you can use to answer the user's question.
You ... | import json
from typing import (
Any,
Dict,
List,
Union,
)
from langchain_core._api import deprecated
from pydantic import PrivateAttr
from langchain_anthropic.chat_models import ChatAnthropic
SYSTEM_PROMPT_FORMAT = """In this environment you have access to a set of tools you can use to answer the us... |
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample ... | from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample i... |
from collections import defaultdict
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, ... | import torch
import transforms as T
class SegmentationPresetTrain:
def __init__(self, *, base_size, crop_size, hflip_prob=0.5, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
min_size = int(0.5 * base_size)
max_size = int(2.0 * base_size)
trans = [T.RandomResize(min_size, max_size... |
import logging
import os
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from... | import logging
import os
import pathlib
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import D... |
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .multi_instance_bbox_head import ... | # Copyright (c) OpenMMLab. All rights reserved.
from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .sabl_head import SABLHead
from .... |
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import ... | import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import ... |
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
... | _base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(require... |
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.registry import _REGISTRY
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storag... | from typing import Union, Iterable
from ..base.seqlike import BaseSequenceLikeMixin
from ..registry import _REGISTRY
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storage"""
def __eq__(self, other):
"""... |
from . import InputExample
import csv
import gzip
import os
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column t... | from . import InputExample
import csv
import gzip
import os
class STSDataReader:
"""
Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third col... |
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = No... | checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = No... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling op... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling op... |
"""Tests for tf.distribute related functionality under tf implementation."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend... | """Tests for tf.distribute related functionality under tf implementation."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend... |
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primar... | from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primar... |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def mixin_hub_usage_parser(parser):
"""Add the arguments for hub pull to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--no-usage',
action='store_true',
default=False,
... | from jina.parsers.base import set_base_parser
from jina.parsers.helper import _chf
def mixin_hub_usage_parser(parser):
"""Add the arguments for hub pull to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--no-usage',
action='store_true',
default=False,
... |
"""Test openai embeddings."""
import numpy as np
import pytest
from langchain_community.embeddings.openai import OpenAIEmbeddings
@pytest.mark.scheduled
def test_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings()
output = embedd... | """Test openai embeddings."""
import numpy as np
import pytest
from langchain_community.embeddings.openai import OpenAIEmbeddings
@pytest.mark.scheduled
def test_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings()
output = embedd... |
from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream... | from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledis... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledis... |
import numpy as np
from docarray.array import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import NdArray
def test_get_bulk_attributes_function():
class Mmdoc(BaseDocument):
text: str
tensor: NdArray
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=... | import numpy as np
from docarray.array import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import Tensor
def test_get_bulk_attributes_function():
class Mmdoc(BaseDocument):
text: str
tensor: Tensor
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'... |
from abc import abstractmethod
import pytest
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.base import BaseStandardTests
class RetrieversIntegrationTests(BaseStandardTests):
"""Base class for retrievers integration tests."""
@property... | from abc import abstractmethod
import pytest
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.base import BaseStandardTests
class RetrieversIntegrationTests(BaseStandardTests):
"""
Base class for retrievers integration tests.
"""
... |
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.f... | from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.f... |
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_typ... | from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_typ... |
from urllib.parse import quote
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class Fa... | from urllib.parse import quote
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class Fa... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.s... | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.s... |
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the... | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the... |
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses import (
FlopsLoss,
SparseDistillKLDivLoss,
SparseMarginMSELoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transf... | from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses import (
FlopsLoss,
SparseDistillKLDivLoss,
SparseMarginMSELoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transf... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
cla... | from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
cla... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOO... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOO... |
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from typing_extensions import override
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
i... | """Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core... |
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2nda... | # Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2nda... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunc... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunc... |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... |
# mypy: allow-untyped-defs
import warnings
import torch
import torch.distributed.algorithms.model_averaging.averagers as averagers
class PostLocalSGDOptimizer(torch.optim.Optimizer):
r"""
Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
This... | # mypy: allow-untyped-defs
import warnings
import torch
import torch.distributed.algorithms.model_averaging.averagers as averagers
class PostLocalSGDOptimizer(torch.optim.Optimizer):
r"""
Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
This... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It ca... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It ca... |
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseMSEEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE mod... | from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseMSEEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(s... |
# coding: utf-8
"""Get the most recent status of workflow for the current PR.
[usage]
python get_workflow_status.py TRIGGER_PHRASE
TRIGGER_PHRASE: Code phrase that triggers workflow.
"""
import json
from os import environ
from sys import argv, exit
from time import sleep
from urllib import request
def get_runs... | # coding: utf-8
"""Get the most recent status of workflow for the current PR.
[usage]
python get_workflow_status.py TRIGGER_PHRASE
TRIGGER_PHRASE: Code phrase that triggers workflow.
"""
import json
from os import environ
from sys import argv, exit
from time import sleep
from urllib import request
def get_runs(... |
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class MyDoc(BaseDoc):
tens: NdArray
def test_configure_dim(tmp_path):
class Schema(Ba... | import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class MyDoc(BaseDoc):
tens: NdArray
def test_configure_dim(tmp_path):
class Schema(Ba... |
from enum import Enum
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import pre_init
class EnumOutputParser(BaseOutputParser[Enum]):
"""Parse an output that is one of a set of values."""
enum: type[Enum]
""... | from enum import Enum
from typing import Dict, List, Type
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import pre_init
class EnumOutputParser(BaseOutputParser[Enum]):
"""Parse an output that is one of a set of val... |
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchv... | import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchv... |
from llama_index.core.base.llms.types import (
LLMMetadata,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.llms.openai_like import OpenAILike
class LlamaAPI(OpenAILike):
"""
LlamaAPI LLM.
Examples:
`pip install llama-index-llms-llama-api`
```python
from... | from llama_index.core.base.llms.types import (
LLMMetadata,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.llms.openai_like import OpenAILike
class LlamaAPI(OpenAILike):
"""LlamaAPI LLM.
Examples:
`pip install llama-index-llms-llama-api`
```python
from llam... |
from docarray.array.array.array import DocArray
from docarray.array.stacked.array_stacked import DocArrayStacked
__all__ = ['DocArray', 'DocArrayStacked']
| from docarray.array.array.array import DocumentArray
from docarray.array.stacked.array_stacked import DocumentArrayStacked
__all__ = ['DocumentArray', 'DocumentArrayStacked']
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src.testing import test_case
class SpatialDropoutTest(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_1d(self):
self.run_layer_test(
layers.SpatialD... | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src.testing import test_case
class SpatialDropoutTest(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_1d(self):
self.run_layer_test(
layers.SpatialD... |
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(... | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=... |
import os
import platform
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux") and pla... | import os
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux"):
triton_requirement... |
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from pydantic import BaseModel
# Ignoring type because below is valid... | from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from pydantic import BaseModel
# Ignoring type because below is valid... |
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, m... | # Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, m... |
from langchain_core.globals import get_debug, set_debug
def test_debug_is_settable_via_setter() -> None:
from langchain_core import globals as globals_
from langchain_core.callbacks.manager import _get_debug
previous_value = globals_._debug
previous_fn_reading = _get_debug()
assert previous_value... | from langchain_core.globals import get_debug, set_debug
def test_debug_is_settable_via_setter() -> None:
from langchain_core import globals
from langchain_core.callbacks.manager import _get_debug
previous_value = globals._debug
previous_fn_reading = _get_debug()
assert previous_value == previous_... |
from backend.app import run_processes
from backend.executor import DatabaseManager, ExecutionScheduler
from backend.notifications.notifications import NotificationManager
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
... | from backend.app import run_processes
from backend.executor import DatabaseManager, ExecutionScheduler
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
DatabaseManager(),
ExecutionSchedule... |
import sys
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.head.request_handling import HeaderRequestHandler
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
runtime_args.host = runtime_args.host[0]
run... | import sys
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.head.request_handling import HeaderRequestHandler
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
runtime_args.host = runtime_args.host[0]
run... |
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]... | import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]... |
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import fields
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
... | # mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import fields
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
... |
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the... | from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.