input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
"""Test Fireworks API wrapper.
In order to run this test, you need to have an Fireworks api key.
You can get it by registering for free at https://api.fireworks.ai/.
A test key can be found at https://api.fireworks.ai/settings/api-keys
You'll then need to set FIREWORKS_API_KEY environment variable to your api key.
""... | """Test Fireworks API wrapper.
In order to run this test, you need to have an Fireworks api key.
You can get it by registering for free at https://api.fireworks.ai/.
A test key can be found at https://api.fireworks.ai/settings/api-keys
You'll then need to set FIREWORKS_API_KEY environment variable to your api key.
""... |
from typing import Optional, List
import httpx
from httpx import Timeout
from llama_index.core.base.embeddings.base import BaseEmbedding, Embedding
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
DEFAULT_REQUEST_TIMEOUT = 30.0
class LlamafileEmbedding(... | from typing import Optional, List
import httpx
from httpx import Timeout
from llama_index.core.base.embeddings.base import BaseEmbedding, Embedding
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
DEFAULT_REQUEST_TIMEOUT = 30.0
class LlamafileEmbedding(... |
import os
import warnings
from modulefinder import Module
import torch
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being impor... | import os
import warnings
from modulefinder import Module
import torch
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being impor... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import MagicMock, Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super(... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super().__init__(... |
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
from .TopKActivation import TopKActivation
__all__ = ["CSRSparsity", "TopKActivation", "MLMTransformer", "SpladePooling"]
| from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
from .TopKActivation import TopKActivation
__all__ = ["CSRSparsity", "TopKActivation", "MLMTransformer", "SpladePooling"]
# TODO : Add in models the possibility t... |
import os
import pytest as pytest
from jina import Flow, DocumentArray, Document
from ..redis_storage import RedisStorage
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirec... | import os
import pytest as pytest
from jina import Flow, DocumentArray, Document
from .. import RedisStorage
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_co... |
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderst... | from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderst... |
"""
=========================
Caching nearest neighbors
=========================
This example demonstrates how to precompute the k nearest neighbors before
using them in KNeighborsClassifier. KNeighborsClassifier can compute the
nearest neighbors internally, but precomputing them can have several benefits,
such as fi... | """
=========================
Caching nearest neighbors
=========================
This examples demonstrates how to precompute the k nearest neighbors before
using them in KNeighborsClassifier. KNeighborsClassifier can compute the
nearest neighbors internally, but precomputing them can have several benefits,
such as f... |
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
zero_init_residua... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
zero_init_residua... |
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterB... | _base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedR... |
from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.single_agent_workflow import SingleAgentRunnerMixin
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCal... | from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.single_agent_workflow import SingleAgentRunnerMixin
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCal... |
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTime... | # Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTimerHook(H... |
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'b... | # Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'b... |
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Document
... | from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_openapi_client.exceptions import UnexpectedResponse
from qdrant_openapi_client.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Doc... |
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: Spars... | from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: Spars... |
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .c... | # Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .c... |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.registry import TASK_UTILS
from .random_sampler import RandomSampler
@TASK_UTILS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
"""Instance balanced sampler that samples equal number of positive samples... | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from ..builder import BBOX_SAMPLERS
from .random_sampler import RandomSampler
@BBOX_SAMPLERS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
"""Instance balanced sampler that samples equal number of positive sample... |
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.type import Medi... | from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
from backend.util.type import Medi... |
_base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
... | _base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py'
# learning policy
lr_config = dict(step=[20, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from docarray.array.mixins import ParallelMixin, GroupMixin
from docarray.helper import protocol_and_compress_from_file_path
if TYPE_CHECKING: # pragma: no cover
from docarray import Document, DocumentArray
class DocumentArrayLoader(Par... | from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from docarray.array.mixins import ParallelMixin, GroupMixin
from docarray.helper import protocol_and_compress_from_file_path
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class DocumentArrayLoader(ParallelMixin, GroupMix... |
"""Psychic reader."""
import logging
import os
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class PsychicReader(BaseReader):
"""
Psychic reader.
Psychic is a platform that allows... | """Psychic reader."""
import logging
import os
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class PsychicReader(BaseReader):
"""
Psychic reader.
Psychic is a platform that allows ... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a co... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a co... |
from typing import Any, Dict, Optional
from elasticsearch import AsyncElasticsearch, Elasticsearch
from logging import getLogger
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.utils import metadata_dict_to_node
logger = getLogger(__name__)
def get_user_agent() -> str:
... | from typing import Any, Dict, Optional
from elasticsearch import AsyncElasticsearch, Elasticsearch
def get_user_agent() -> str:
"""Get user agent for Elasticsearch client."""
import llama_index.core
version = getattr(llama_index.core, "__version__", "")
return f"llama_index-py-vs/{version}"
def ge... |
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import PointCloud3D
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
impor... | import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import PointCloud3D
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
so... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import bounding_boxes
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_ker... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.var... |
from langchain_core.runnables.history import (
GetSessionHistoryCallable,
MessagesOrDictWithMessages,
RunnableWithMessageHistory,
)
__all__ = [
"GetSessionHistoryCallable",
"MessagesOrDictWithMessages",
"RunnableWithMessageHistory",
]
| from langchain_core.runnables.history import (
GetSessionHistoryCallable,
MessagesOrDictWithMessages,
RunnableWithMessageHistory,
)
__all__ = [
"RunnableWithMessageHistory",
"GetSessionHistoryCallable",
"MessagesOrDictWithMessages",
]
|
from pathlib import Path
from typing import Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"
_... | from pathlib import Path
from typing import Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VC... |
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import (
BaseCumulativeTransformOutputParser,
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
BaseTransformOutputParser,
StrOutputParser,
)
from langchain_core.output_parsers.base im... | from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import (
BaseCumulativeTransformOutputParser,
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
BaseTransformOutputParser,
StrOutputParser,
)
from langchain_core.output_parsers.base im... |
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/effici... | import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/effici... |
from docarray.array.any_array import AnyDocArray
from docarray.array.doc_list.doc_list import DocList
from docarray.array.doc_vec.doc_vec import DocVec
__all__ = ['DocList', 'DocVec', 'AnyDocArray']
| from docarray.array.document import DocumentArray
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataSample
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
... |
from typing import Optional
import pytest
import torch
from docarray import BaseDocument, DocumentArray
from docarray.array.abstract_array import AnyDocumentArray
from docarray.documents import TextDoc
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def mu... | from typing import Optional
import pytest
import torch
from docarray import BaseDocument, DocumentArray
from docarray.array.abstract_array import AnyDocumentArray
from docarray.documents import Text
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import subprocess
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# When executing `import mmengine.runner`,
# pkg_resources will be im... | # Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import subprocess
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# When executing `import mmengine.runner`,
# pkg_res... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g learning r... |
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for indivi... | """
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for indivi... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembled... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembled... |
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualiz... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .optimizer_hook... |
# mypy: allow-untyped-defs
import torch.distributed as dist
from torch._C._distributed_c10d import FakeProcessGroup
class FakeStore(dist.Store):
"""
A fake store is a fake Key-Value store simply for initialization usage
the of fake process group, one can either use FakeStore or HashStore.
"""
def _... | # mypy: allow-untyped-defs
import torch.distributed as dist
from torch._C._distributed_c10d import FakeProcessGroup
class FakeStore(dist.Store):
"""
A fake store is a fake Key-Value store simply for initialization usage
the of fake process group, one can either use FakeStore or HashStore.
"""
def _... |
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns... | import logging
import os
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string ... |
import json
import numpy as np
import xgboost as xgb
rng = np.random.RandomState(1994)
class TestGPUTrainingContinuation:
def test_training_continuation(self):
kRows = 64
kCols = 32
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
dtrain = xgb.DMatrix(X, y)
... | import json
import numpy as np
import xgboost as xgb
rng = np.random.RandomState(1994)
class TestGPUTrainingContinuation:
def test_training_continuation(self):
kRows = 64
kCols = 32
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
dtrain = xgb.DMatrix(X, y)
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Clickhouse, ClickhouseSettings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Clickhouse, ClickhouseSettings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... |
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
"""Table node mapping."""
import uuid
from typing import Any, Dict, Optional, Sequence
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.objects.base_node_mapping import (
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
BaseObjectNodeMapping,
)
from llama_index.core.schema import Ba... | """Table node mapping."""
from typing import Any, Dict, Optional, Sequence
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.objects.base_node_mapping import (
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
BaseObjectNodeMapping,
)
from llama_index.core.schema import BaseNode, Text... |
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='Random... | # dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetIn... |
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
... | import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
... |
"""Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""R... | """Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""R... |
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import ... | from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule
from .utils import ... |
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from... | from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize
from keras.src.dtype_policies import get
from keras.src.dtype_policies import serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize
from keras.src.dtype_policies import get
from keras.src.dtype_policies import serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy... |
from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial002 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert '"syntaxHighlight": false' not in response.text, (
"not used... | from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial002 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert (
'"syntaxHighlight": false' not in response.text
), "no... |
import json
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.utilities.jira import JiraAPIWrapper
@pytest.fixture
def mock_jira(): # type: ignore
with patch("atlassian.Jira") as mock_jira:
yield mock_jira
@pytest.mark.requires("atlassian")
class TestJiraAPIWrapper:
... | from unittest.mock import MagicMock, patch
import pytest
from langchain_community.utilities.jira import JiraAPIWrapper
@pytest.fixture
def mock_jira(): # type: ignore
with patch("atlassian.Jira") as mock_jira:
yield mock_jira
@pytest.mark.requires("atlassian")
class TestJiraAPIWrapper:
def test_j... |
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing... | """**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing... |
from typing import Union, TextIO, BinaryIO, TYPE_CHECKING, Type
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class CommonIOMixin:
"""The common IO helper function for arrays."""
def save(
self,
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary'... | from typing import Union, TextIO, BinaryIO, TYPE_CHECKING, Type
if TYPE_CHECKING:
from docarray.typing import T
class CommonIOMixin:
"""The common IO helper function for arrays."""
def save(
self,
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: ... |
from typing import Any, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing.tensor.tensor import AnyTensor
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDoc):
"""
Document for handling 3D mesh tensor data.
A VerticesAndFaces Document can contain an An... | from typing import Any, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing.tensor.tensor import AnyTensor
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDocument):
"""
Document for handling 3D mesh tensor data.
A VerticesAndFaces Document ca... |
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Permute")
class Permute(Layer):
"""Permutes the dimensions of... | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Permute")
class Permute(Layer):
"""Permutes the dimensions of... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
__all__ = [
'get_roo... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest... |
from typing import Any
def __getattr__(name: str = "") -> Any:
msg = (
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SE... | from typing import Any
def __getattr__(name: str = "") -> Any:
raise AttributeError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/... |
"""Message responsible for deleting other messages."""
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for se... | """Message responsible for deleting other messages."""
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for se... |
# Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, oid_challenge_classes,
oid_v6_classes, voc_classes)
from .ev... | # Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, voc_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import avera... |
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEnc... | from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEn... |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for ... | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for ... |
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked... | import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked... |
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_executio... | from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incom... |
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.6.0"
@keras_export("keras.version")
def version():
return __version__
| from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.5.0"
@keras_export("keras.version")
def version():
return __version__
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py310, needs_pydanticv2
@pytest.fixture(
name="client",
params=[
"tutorial001",
pytest.param("tutorial001_py310", marks=needs_py310),
],
)
def get_client(request: pytest.FixtureRequest):
... | import pytest
from fastapi.testclient import TestClient
from ...utils import needs_pydanticv2
@pytest.fixture(name="client")
def get_client():
from docs_src.schema_extra_example.tutorial001 import app
client = TestClient(app)
return client
@needs_pydanticv2
def test_post_body_example(client: TestClien... |
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))))
| _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))))
|
import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import Docum... | import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import Docum... |
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir,... | import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir,... |
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, Tuple, Type, TypeVar
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT... | import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, Tuple, Type, TypeVar
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT... |
"""Function Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class FunctionMessage(BaseMessage):
"""Message for passing th... | """Function Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class FunctionMessage(BaseMessage):
"""Message for passing th... |
from typing import List
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset... | from torch.utils.data import Dataset
from typing import List
from ..readers.InputExample import InputExample
import numpy as np
import nltk
from nltk.tokenize.treebank import TreebankWordDetokenizer
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the for... |
from pathlib import Path
from typing import Callable
import numpy as np
import pytest
import torchaudio
from jina import Document, DocumentArray
from vad_speech_segmenter import VADSpeechSegmenter
@pytest.fixture(scope='module')
def segmenter(tmpdir_factory) -> 'VADSpeechSegmenter':
workspace = tmpdir_factory.mk... | from pathlib import Path
from typing import Callable
import numpy as np
import pytest
import torchaudio
from jina import Document, DocumentArray
from ..vad_speech_segmenter import VADSpeechSegmenter
@pytest.fixture(scope='module')
def segmenter(tmpdir_factory) -> 'VADSpeechSegmenter':
workspace = tmpdir_factory... |
"""Standard LangChain interface tests for Responses API"""
from pathlib import Path
from typing import cast
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_openai import ChatOpenAI
from tests.integration_tests.chat_models.test_base_s... | """Standard LangChain interface tests for Responses API"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_openai import ChatOpenAI
from tests.integration_tests.chat_models.test_base_standard import TestOpenAIStandard
class TestOpenAIResponses(TestOpenAIStandard):
@property... |
from typing import Any, Dict, List, Optional, Union
from huggingface_hub.utils import get_session
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(... | from typing import Any, Dict, List, Optional, Union
from huggingface_hub.utils import get_session
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(... |
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_pr... | from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_pr... |
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .gtzan import GTZAN
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech imp... | from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .gtzan import GTZAN
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .quesst14 import QUESST14
from .... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.vide... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_t... |
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatData... | # Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedD... |
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (... | # Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (... |
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway
__all__ = ['HTTPGateway']
class HTTPGateway(FastAPIBaseGateway):
"""
:class:`HTTPGateway` is a FastAPIBaseGateway that uses the default FastAPI app
"""
@property
def app(self):
"""Get the default base API app for HTT... | from jina.serve.runtimes.gateway.http.gateway import HTTPGateway
__all__ = ['HTTPGateway']
|
import json
import os
import zlib
from typing import Callable, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str... | import zlib
from typing import Iterator, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {st... |
# Copyright (c) OpenMMLab. All rights reserved.
import random
from typing import Any, Sequence, Tuple
import numpy as np
import torch
from .base_data_element import BaseDataElement
DATA_BATCH = Sequence[Tuple[Any, BaseDataElement]]
def worker_init_fn(worker_id: int, num_workers: int, rank: int,
... | # Copyright (c) OpenMMLab. All rights reserved.
import random
from typing import Any, Sequence, Tuple
import numpy as np
import torch
from .base_data_sample import BaseDataSample
DATA_BATCH = Sequence[Tuple[Any, BaseDataSample]]
def worker_init_fn(worker_id: int, num_workers: int, rank: int,
see... |
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
... | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
... |
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/spl... | import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/spl... |
import numpy as np
import pytest
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
@pyte... | import numpy as np
import pytest
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
@pyte... |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit
from backend.data.execution impor... | from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit
from backend.data.execution impor... |
"""
This example trains a SparseEncoder for the Natural Questions (NQ) dataset.
The training script fine-tunes a SparseEncoder using the Splade loss function for retrieval.
It loads a subset of the Natural Questions dataset, splits it into training and evaluation subsets,
and trains the model as a retriever. After trai... | """
This example trains a SparseEncoder for the Natural Questions (NQ) task.
The training script fine-tunes a SparseEncoder using the Splade loss function for retrieval.
It loads a subset of the Natural Questions dataset, splits it into training and evaluation subsets,
and trains the model as a retriever. After trainin... |
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_... | """
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_... |
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .data_structures import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .optimiz... | # Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .data_structures import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .post_pr... |
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""Convert a PIL Image to ... | from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Im... |
from __future__ import annotations
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super().__init__()
def forward(self, features: dict[str, Tensor]) -> dict[str, Tensor]:
... | from __future__ import annotations
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super(Normalize, self).__init__()
def forward(self, features: dict[str, Tensor]) -> dict[... |
import json
import logging
import os
from typing import Dict, List
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be adde... | import torch
from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
import logging
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weigh... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
f... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
f... |
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DMod... | from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DMod... |
"""Read PDF files using PyMuPDF library."""
from pathlib import Path
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PyMuPDFReader(BaseReader):
"""Read PDF files using PyMuPDF library."""
def load_data(
... | """Read PDF files using PyMuPDF library."""
from pathlib import Path
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PyMuPDFReader(BaseReader):
"""Read PDF files using PyMuPDF library."""
def load_data(
... |
import contextlib
import json
import re
from typing import Any, List
with contextlib.suppress(ImportError):
import yaml
from llama_index.core.output_parsers.base import OutputParserException
def _marshal_llm_to_json(output: str) -> str:
"""
Extract a substring containing valid JSON or array from a strin... | import contextlib
import json
import re
from typing import Any, List
with contextlib.suppress(ImportError):
import yaml
from llama_index.core.output_parsers.base import OutputParserException
def _marshal_llm_to_json(output: str) -> str:
"""
Extract a substring containing valid JSON or array from a strin... |
"""Test LLM program."""
import json
import pytest
from typing import Sequence
from unittest.mock import MagicMock
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import LLMMetadata
from llama_index.core.output_parsers.pydantic import PydanticOutputParser
from llama_index.core.program... | """Test LLM program."""
import json
from typing import Sequence
from unittest.mock import MagicMock
from llama_index.core.base.llms.types import (
CompletionResponse,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.multi_modal_llms import MultiModalLLMMetadata
from llama_index.core.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.