input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
from ..utils import is_torch_available
if is_torch_available():
from .group_offloading import apply_group_offloading
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttention... | from ..utils import is_torch_available
if is_torch_available():
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
|
from typing import Optional
import torch
from docarray import BaseDoc, DocList
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(BaseDoc):
text: str
tensor: Optional[TorchTensor[3, 224, 224]]
N = 10
batch = DocList[Mmdoc](Mmdoc(text=f'hello{i}') for i in range... | from typing import Optional
import torch
from docarray import BaseDoc, DocList
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(BaseDoc):
text: str
tensor: Optional[TorchTensor[3, 224, 224]]
N = 10
batch = DocList[Mmdoc](Mmdoc(text=f'hello{i}') for i in range... |
"""DeepInfra API base URL."""
API_BASE = "https://api.deepinfra.com"
"""DeepInfra Inference API endpoint."""
INFERENCE_ENDPOINT = "v1/openai/completions"
"""Chat API endpoint for DeepInfra."""
CHAT_API_ENDPOINT = "v1/openai/chat/completions"
"""Environment variable name of DeepInfra API token."""
ENV_VARIABLE = "DEEPI... | """DeepInfra API base URL."""
API_BASE = "https://api.deepinfra.com"
"""DeepInfra Inference API endpoint."""
INFERENCE_ENDPOINT = "v1/openai/completions"
"""Chat API endpoint for DeepInfra."""
CHAT_API_ENDPOINT = "v1/openai/chat/completions"
"""Environment variable name of DeepInfra API token."""
ENV_VARIABLE = "DEEPIN... |
from .tensor import Tensor
Embedding = Tensor
| import numpy as np
from .tensor import Tensor
Embedding = Tensor
|
import inspect
from keras.src.api_export import keras_export
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.... | import inspect
from keras.src.api_export import keras_export
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import NucliaTextTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import NucliaTextTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling opti... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import shutil
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
subprocess.run(
'scripts/download_full.sh',
cwd=Path(_... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import shutil
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
subprocess.run(
'scripts/download_full.sh',
cwd=Path(_... |
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_mode... | from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_mode... |
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments, losses
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
from sentence_transformers.tr... | from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.l... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='ndarray_embedding')
class NdArrayEmbedding(NdArray, EmbeddingMixin):
alternative_type = NdArra... |
from typing import Any, Union
from ..utils import add_end_docstrings, is_vision_available
from .base import GenericTensor, Pipeline, build_pipeline_init_args
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
@add_end_docstrings(
build_pipeline_init_args(has_image_pro... | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_vision_available
from .base import GenericTensor, Pipeline, build_pipeline_init_args
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
@add_end_docstrings(
build_pipeline_init_args(h... |
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_p... | _base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_p... |
from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN... | from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
... |
from langchain_core.language_models import (
BaseLanguageModel,
LanguageModelInput,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.base import _get_token_ids_default_method
__all__ = [
"BaseLanguageModel",
"LanguageModelInput",
"LanguageModelOutput",
"_get_tok... | from langchain_core.language_models import (
BaseLanguageModel,
LanguageModelInput,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.base import _get_token_ids_default_method
__all__ = [
"get_tokenizer",
"BaseLanguageModel",
"_get_token_ids_default_method",
"Lan... |
_base_ = './mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
... | _base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
... |
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...dpr_text import DPRTextEncoder
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here... | import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...dpr_text import DPRTextEncoder
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here... |
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
... | import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
... |
import pytest
from xgboost.testing.federated import run_federated_learning
@pytest.mark.parametrize("with_ssl", [True, False])
def test_federated_learning(with_ssl: bool) -> None:
run_federated_learning(with_ssl, False, __file__)
| #!/usr/bin/python
import multiprocessing
import sys
import time
import xgboost as xgb
import xgboost.federated
SERVER_KEY = 'server-key.pem'
SERVER_CERT = 'server-cert.pem'
CLIENT_KEY = 'client-key.pem'
CLIENT_CERT = 'client-cert.pem'
def run_server(port: int, world_size: int, with_ssl: bool) -> None:
if with_s... |
_base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
... | _base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
... |
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
| _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
from llama_index.core import Document, MockEmbedding, global_tokenizer
from llama_index.core.llms import MockLLM
from llama_index.packs.raptor.base import RaptorRetriever
import pytest
@pytest.mark.skipif(
condition=(global_tokenizer is None), reason="No global tokenizer set"
)
def test_raptor() -> None:
retr... | from llama_index.core import Document, MockEmbedding, global_tokenizer
from llama_index.core.llms import MockLLM
from llama_index.packs.raptor.base import RaptorRetriever
import pytest
@pytest.mark.skipif(
condition = (global_tokenizer is None), reason="No global tokenizer set"
)
def test_raptor() -> None:
ret... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label ... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label A... |
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import ChatOpenAI
class TestOpenAIResponses(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
... | """Standard LangChain interface tests"""
from typing import Tuple, Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import ChatOpenAI
class TestOpenAIResponses(ChatModelUnitTests):
@property
def chat_model_class(se... |
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
... | # optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the Cros... | from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
# TODO: Consider the naming of this class
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_method,
import_modules_from_strings, is_list_of,
... | # Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_method,
import_modules_from_strings, is_list_of,
... |
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import MongoDBAtlasDocumentIndex
from docarray.typing import NdArray
from . import NestedDoc, SimpleDoc, SimpleSchema, assert_when_ready
def test_find_simple_schema(simple_index_with_docs, n_dim): # noqa: F... | import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import MongoDBAtlasDocumentIndex
from docarray.typing import NdArray
from . import NestedDoc, SimpleDoc, SimpleSchema, assert_when_ready
N_DIM = 10
def test_find_simple_schema(simple_index_with_docs): # no... |
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_sim... | from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_sim... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import (
AmazonTextractPDFLoader,
MathpixPDFLoader,
OnlinePDFLoader,
PagedPDFSplitter,
PDFMinerLoader,
PDFMinerPDFasHTMLLoade... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import (
AmazonTextractPDFLoader,
MathpixPDFLoader,
OnlinePDFLoader,
PagedPDFSplitter,
PDFMinerLoader,
PDFMinerPDFasHTMLLoade... |
import PIL.Image
import pytest
import torch
import torchvision.transforms.v2._utils
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image
from torchvision import tv_tensors
from torchvision.transforms.v2._utils import has_all, has_any
from torchvision.transforms.v2.functional im... | import PIL.Image
import pytest
import torch
import torchvision.transforms.v2._utils
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image
from torchvision import datapoints
from torchvision.transforms.v2._utils import has_all, has_any
from torchvision.transforms.v2.functional im... |
"""Test ChatDeepSeek chat model."""
from __future__ import annotations
from typing import Optional
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tes... | """Test ChatDeepSeek chat model."""
from typing import Optional
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
... |
from typing import TYPE_CHECKING
from backend.integrations.oauth.todoist import TodoistOAuthHandler
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .linear import LinearOAuthHandler
from .notion import NotionOAuthHandler
from .twitter import TwitterOAuthHandler
if TYPE_CHECKING:
... | from typing import TYPE_CHECKING
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .linear import LinearOAuthHandler
from .notion import NotionOAuthHandler
from .twitter import TwitterOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAu... |
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEmbeddingSimilarityEvaluator,
SparseEncoder,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initializ... | from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEmbeddingSimilarityEvaluator,
SparseEncoder,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLM... |
_base_ = './fovea_r50_fpn_4xb4-1x_coco.py'
model = dict(
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotation... | _base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations... |
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# learning policy
lr_config = dict(step=[16, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
| _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip... | from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip... |
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.doc... | from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.doc... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GoogleSerperResults, GoogleSerperRun
"""Google Serper API Toolkit."""
"""Tool for the Serer.dev Google Search API."""
# Create a way to dynamically look up deprecated imports.... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GoogleSerperResults, GoogleSerperRun
"""Google Serper API Toolkit."""
"""Tool for the Serer.dev Google Search API."""
# Create a way to dynamically look up deprecated imports.... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = ... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = ... |
"""Standard LangChain interface tests"""
import base64
from pathlib import Path
from typing import Literal, cast
import httpx
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_tests.integration_tests import ChatModelIntegr... | """Standard LangChain interface tests"""
import base64
from pathlib import Path
from typing import Literal, cast
import httpx
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
fr... |
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.doc... | from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.doc... |
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jin... | import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jin... |
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualiz... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualiz... |
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(Sentenc... | from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(Sentenc... |
from typing import Any
import pytest
from langchain_tests.conftest import CustomPersister, CustomSerializer
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
from vcr import VCR # type: ignore[import-untyped]
_EXTRA_HEADERS = [
("openai-organization", "PLACEHOLDER"),
("user-agent", "P... | from typing import Any
import pytest
from langchain_tests.conftest import YamlGzipSerializer
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
from vcr import VCR # type: ignore[import-untyped]
_EXTRA_HEADERS = [
("openai-organization", "PLACEHOLDER"),
("user-agent", "PLACEHOLDER"),
... |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
... | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
... |
"""A unit test meant to catch accidental introduction of non-optional dependencies."""
from collections.abc import Mapping
from pathlib import Path
from typing import Any
import pytest
import toml
from packaging.requirements import Requirement
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.to... | """A unit test meant to catch accidental introduction of non-optional dependencies."""
from pathlib import Path
from typing import Any, Dict, Mapping
import pytest
import toml
from packaging.requirements import Requirement
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture... |
from abc import ABC
from docarray.array.storage.sqlite.backend import BackendMixin, SqliteConfig
from docarray.array.storage.sqlite.getsetdel import GetSetDelMixin
from docarray.array.storage.sqlite.seqlike import SequenceLikeMixin
from docarray.array.storage.memory.find import (
FindMixin,
) # temporary delegate... | from abc import ABC
from .backend import BackendMixin, SqliteConfig
from .getsetdel import GetSetDelMixin
from .seqlike import SequenceLikeMixin
from ..memory.find import FindMixin # temporary delegate to in-memory find API
__all__ = ['StorageMixins', 'SqliteConfig']
class StorageMixins(FindMixin, BackendMixin, Ge... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import UnstructuredOrgModeLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optio... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import UnstructuredOrgModeLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optio... |
_base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
... | _base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
... |
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.typing.proto_register import _PROTO_TYPE_NAME_TO_CLASS
if TYPE_CHECKING:
from docarray.proto import DocumentProto, No... | from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
... |
_base_ = 'faster-rcnn_r50_fpn_crop640-50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(
type='FPG',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
inter_channels=256,
num_outs=5,
stack_times=9,
paths=['bu'] * 9,
... | _base_ = 'faster_rcnn_r50_fpn_crop640_50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(
type='FPG',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
inter_channels=256,
num_outs=5,
stack_times=9,
paths=['bu'] * 9,
... |
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
... | """**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
... |
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
... | import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.distribution.distribution_lib import DataParallel as DataParallel
from keras.src.distribution.distribution_lib import DeviceMesh as DeviceMesh
from keras.src.distribution.distribution... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.distribution.distribution_lib import DataParallel
from keras.src.distribution.distribution_lib import DeviceMesh
from keras.src.distribution.distribution_lib import LayoutMap
from ker... |
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class WeightedLayerPooling(Module):
"""Token embeddings are weighted mean of their diff... | from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of... |
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class ContentPropertyMixin:
"""Provide helper functions for :class:`Document` to allow universal content property access."""
@property
def content_hash(self) -> int:
"""Get the document hash ... | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from docarray.typing import T
class ContentPropertyMixin:
"""Provide helper functions for :class:`Document` to allow universal content property access."""
@property
def content_hash(self) -> int:
"""Get the document hash according to its con... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.1.0'
mmengi... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.0.0'
mmengi... |
from . import dataset, dist_utils, metrics
__all__ = ["dataset", "dist_utils", "metrics"]
| from . import (
dataset,
dist_utils,
metrics,
)
__all__ = ["dataset", "dist_utils", "metrics"]
|
__version__ = '0.13.19'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.13.19'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet import MobileNet as MobileNet
from keras.src.applications.mobilenet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilen... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet import MobileNet
from keras.src.applications.mobilenet import decode_predictions
from keras.src.applications.mobilenet import preprocess_input
|
_base_ = '../ssd/ssd300_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
| _base_ = '../ssd/ssd300_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
def test_set_point_cloud_url():
class MyDocument(BaseDocument):
point_cloud_url: PointCloud3DUrl
d = MyDocument(point_cloud_url="https://jina.ai/mesh.obj")
assert isinstance(d.point_cloud_url, PointCloud3DUrl)
asse... | from docarray import Document
from docarray.typing import PointCloud3DUrl
def test_set_point_cloud_url():
class MyDocument(Document):
point_cloud_url: PointCloud3DUrl
d = MyDocument(point_cloud_url="https://jina.ai/mesh.obj")
assert isinstance(d.point_cloud_url, PointCloud3DUrl)
assert d.poi... |
"""Standard LangChain interface tests"""
from typing import Type
import pytest # type: ignore[import-not-found]
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
C... | """Standard LangChain interface tests"""
from typing import Optional, Type
import pytest # type: ignore[import-not-found]
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_tests.integration_tests import ( # type: ignore[import-not-fo... |
"""Generate migrations for partner packages."""
import importlib
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retrievers import BaseRetriever
from l... | """Generate migrations for partner packages."""
import importlib
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retrievers import BaseRetriever
from l... |
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow ... | import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow ... |
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` an... | # Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` an... |
from typing import BinaryIO, Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_au... | from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream... |
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, m... | # Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, m... |
import argparse
import os
from typing import List
from jina.parsers.helper import CastHostAction
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jina.parse... | import argparse
import os
from typing import List, Union
from jina.parsers.helper import CastHostAction
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jin... |
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
fro... | import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Data... |
class MissingConfigError(Exception):
"""The attempted operation requires configuration which is not available"""
class NeedConfirmation(Exception):
"""The user must explicitly confirm that they want to proceed"""
class InsufficientBalanceError(ValueError):
user_id: str
message: str
balance: floa... | class MissingConfigError(Exception):
"""The attempted operation requires configuration which is not available"""
class NeedConfirmation(Exception):
"""The user must explicitly confirm that they want to proceed"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric im... | # Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results im... |
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract ... | """
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract ... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super().__init__(... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super().__init__(... |
"""You Retriever."""
import logging
import os
import warnings
from typing import Any, Dict, List, Literal, Optional
import requests
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle... | """You Retriever."""
import logging
import os
import warnings
from typing import Any, Dict, List, Literal, Optional
import requests
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle... |
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless... | #!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless... |
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import Cutmix, Mixup, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
fro... | from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import Cutmix, Mixup, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
fro... |
import tempfile
import os
import time
from typing import Dict
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'docker-compose.yml')
)
milvus_compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'milvus-do... | import tempfile
import os
import time
from typing import Dict
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'docker-compose.yml')
)
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'docarray_test_{next(te... |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lic enses/LICENSE-2.0
#
# Unless required by app... | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lic enses/LICENSE-2.0
#
# Unless required by app... |
"""Quip reader."""
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from typing import Any, Dict, List, Optional
import requests # type: ignore
import time
from pydantic import Field
BASE_URL = "https://platform.quip.com"
class QuipReader(BasePydanticReader)... | """Quip reader."""
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from typing import Any, Dict, List, Optional
import requests # type: ignore
import time
from pydantic import Field
BASE_URL = "https://platform.quip.com"
class QuipReader(BasePydanticReader):... |
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/d... | _base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file... |
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` an... | # Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` an... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize as deserialize
from keras.src.activations import get as get
from keras.src.activations import serialize as serialize
from keras.src.activations.activati... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... |
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTenso... | from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTenso... |
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseM... | from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseM... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Con... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Con... |
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementati... | import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementati... |
import threading
from typing import Optional
__all__ = ["LinearBlockSparsePattern"]
def _is_valid_linear_block_sparse_pattern(
row_block_size: int, col_block_size: int
) -> bool:
return (row_block_size == 1 and col_block_size == 4) or (
row_block_size == 8 and col_block_size == 1
)
# This is a... | import threading
from typing import Optional
__all__ = ["LinearBlockSparsePattern"]
def _is_valid_linear_block_sparse_pattern(
row_block_size: int, col_block_size: int
) -> bool:
return (row_block_size == 1 and col_block_size == 4) or (
row_block_size == 8 and col_block_size == 1
)
# This is a... |
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_C... | from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_C... |
import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-... | from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling... |
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc... | # THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc... |
_base_ = './retinanet_r50_caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)]),
dict(type='Ra... | _base_ = './retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | import numpy as np
from docarray.computation.numpy_backend import NumpyCompBackend
def test_topk_numpy():
top_k = NumpyCompBackend.Retrieval.top_k
a = np.array([1, 4, 2, 7, 4, 9, 2])
vals, indices = top_k(a, 3)
assert vals.shape == (1, 3)
assert indices.shape == (1, 3)
assert (vals.squeeze()... |
import abc
import io
import pathlib
import pickle
from collections.abc import Iterator
from typing import Any, BinaryIO, cast, Optional, Union
import numpy as np
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from... | import abc
import io
import pathlib
import pickle
from typing import Any, BinaryIO, cast, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvi... |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_sta... | import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
... |
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
import backend.server.v2.library.db
import backend.server.v2.library.model
import backend.server.v2.library.routes
app = fastapi.FastAPI()
app.include_router(backend.server.v2... | import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import fastapi.testclient
import pytest_mock
import backend.server.v2.library.db
import backend.server.v2.library.model
import backend.server.v2.library.routes
app = fastapi.FastAPI()
app.include_router(backend.server.v2.library.route... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
with read_base():
from ...config.py_config.test_base_variables import *
| # Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from ...config.py_config.test_base_variables import *
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Dict
def get_metric_value(indicator: str, metrics: Dict) -> Any:
"""Get the metric value specified by an indicator, which can be either a
metric name or a full name with evaluator prefix.
Args:
indicator (str): The metric ind... | from typing import Any, Dict
def get_metric_value(indicator: str, metrics: Dict) -> Any:
"""Get the metric value specified by an indicator, which can be either a
metric name or a full name with evaluator prefix.
Args:
indicator (str): The metric indicator, which can be the metric name
... |
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio... | import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.utils._internal.mis... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.