input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
import collections
import csv
import numpy as np
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.utils import file_utils
@keras_export("keras.callbacks.CSVLogger")
class CSVLogger(Callback):
"""Callback that streams epoch results to a CSV file.
... | import collections
import csv
import numpy as np
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.utils import file_utils
@keras_export("keras.callbacks.CSVLogger")
class CSVLogger(Callback):
"""Callback that streams epoch results to a CSV file.
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import RocksetChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import RocksetChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... |
"""Test ZhipuAI Text Embedding."""
from langchain_community.embeddings.zhipuai import ZhipuAIEmbeddings
def test_zhipuai_embedding_documents() -> None:
"""Test ZhipuAI Text Embedding for documents."""
documents = ["This is a test query1.", "This is a test query2."]
embedding = ZhipuAIEmbeddings() # type... | """Test ZhipuAI Text Embedding."""
from langchain_community.embeddings.zhipuai import ZhipuAIEmbeddings
def test_zhipuai_embedding_documents() -> None:
"""Test ZhipuAI Text Embedding for documents."""
documents = ["This is a test query1.", "This is a test query2."]
embedding = ZhipuAIEmbeddings() # type... |
from __future__ import annotations
from typing import Optional
import torch
import torch.ao.nn.intrinsic as nni
import torch.ao.nn.qat as nnqat
import torch.nn.functional as F
from torch.ao.nn.intrinsic.modules.fused import _FusedModule
__all__ = ["LinearReLU"]
class LinearReLU(nnqat.Linear, _FusedModule):
r"... | # mypy: allow-untyped-defs
import torch
import torch.ao.nn.intrinsic as nni
import torch.ao.nn.qat as nnqat
import torch.nn.functional as F
class LinearReLU(nnqat.Linear, nni._FusedModule):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for weight, used in
... |
from typing import Any, Dict, List, Optional, Union
from docarray.utils.query_language.lookup import (
LookupLeaf,
LookupNode,
LookupTreeElem,
Q,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': 'lt',
... | from typing import Dict, Any, Optional, Union, List
from docarray.utils.query_language.lookup import (
Q,
LookupNode,
LookupLeaf,
LookupTreeElem,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': 'lt',
... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class HTCMaskHead(FCNMaskHead):
def __init__(self, with_conv_res=True, *args, **kwargs):
super(HTCMaskHead, self).__init_... | from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class HTCMaskHead(FCNMaskHead):
def __init__(self, with_conv_res=True, *args, **kwargs):
super(HTCMaskHead, self).__init__(*args, **kwargs)
self.with_conv_res = ... |
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions impo... | import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions impo... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='NASFCOS',
prepr... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='NASFCOS',
prepr... |
"""Test HuggingFace embeddings."""
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
from langchain_huggingface.embeddings import (
HuggingFaceEmbeddings,
HuggingFaceEndpointEmbeddings,
)
class TestHuggingFaceEmbeddings(EmbeddingsIntegrationTests):
@property
def embeddings_cla... | """Test HuggingFace embeddings."""
from typing import Type
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
from langchain_huggingface.embeddings import (
HuggingFaceEmbeddings,
HuggingFaceEndpointEmbeddings,
)
class TestHuggingFaceEmbeddings(EmbeddingsIntegrationTests):
@proper... |
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting imp... | # Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting imp... |
from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, ... | from typing import Union, Iterable, Dict
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other)... |
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(
embed_dims=192,
num_heads=[6, 12, 24, 48],
init_cfg=dict(t... | _base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(
embed_dims=192,
num_heads=[6, 12, 24, 48],
init_cfg=dict(t... |
from llama_index_instrumentation.event_handlers.base import BaseEventHandler
from llama_index_instrumentation.event_handlers.null import NullEventHandler
__all__ = ["BaseEventHandler", "NullEventHandler"]
| from llama_index.core.instrumentation.event_handlers.base import BaseEventHandler
from llama_index.core.instrumentation.event_handlers.null import NullEventHandler
__all__ = ["BaseEventHandler", "NullEventHandler"]
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='bac... | _base_ = './rtmdet_l_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='bac... |
"""Standard LangChain interface tests."""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_perplexity import ChatPerplexity
class TestPerplexityStandard(ChatModelIntegrationTests):
@property
def chat... | """Standard LangChain interface tests."""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_perplexity import ChatPerplexity
class TestPerplexityStandard(ChatModelIntegrationTests):
... |
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
import mmcv
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_ar... | # Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Imag... |
"""
Demo for using xgboost with sklearn
===================================
"""
import multiprocessing
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import GridSearchCV
import xgboost as xgb
if __name__ == "__main__":
print("Parallel Parameter optimization")
X, y = fetch_... | """
Demo for using xgboost with sklearn
===================================
"""
import multiprocessing
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import GridSearchCV
import xgboost as xgb
if __name__ == "__main__":
print("Parallel Parameter optimization")
X, y = fetch_... |
"""
Tool implementations for the Riza (https://riza.io) code interpreter API.
Documentation: https://docs.riza.io
API keys: https://dashboard.riza.io
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, Tool... | """
Tool implementations for the Riza (https://riza.io) code interpreter API.
Documentation: https://docs.riza.io
API keys: https://dashboard.riza.io
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, Tool... |
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
... | from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
... |
from parameterized import parameterized
from torchaudio import sox_effects
from torchaudio_unittest.common_utils import (
get_sinusoid,
get_wav_data,
save_wav,
skipIfNoSox,
TempDirMixin,
TorchaudioTestCase,
)
from .common import load_params
@skipIfNoSox
class SmokeTest(TempDirMixin, Torchaudi... | from parameterized import parameterized
from torchaudio import sox_effects
from torchaudio_unittest.common_utils import (
get_sinusoid,
get_wav_data,
save_wav,
skipIfNoSox,
TempDirMixin,
TorchaudioTestCase,
)
from .common import load_params
@skipIfNoSox
class SmokeTest(TempDirMixin, Torchaudi... |
import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metada... | import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metada... |
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UpSampling1D")
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times a... | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UpSampling1D")
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times a... |
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
img_scale = (640, 640) # height, width
# model settings
model = dict(
type='YOLOX',
input_size=img_scale,
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen... | _base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
img_scale = (640, 640)
# model settings
model = dict(
type='YOLOX',
input_size=img_scale,
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
... |
import warnings
from abc import abstractmethod
from typing import Iterable, Iterator, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getat... | from abc import abstractmethod
from typing import Iterator, Iterable, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getattr(self, '_subin... |
import itertools
import os.path
import pytest
import requests as req
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requ... | import itertools
import os.path
import pytest
import requests as req
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requ... |
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
... | _base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
... |
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTripletEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE... | from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTripletEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(... |
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjs... | import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray... |
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
"""
def __init__(
se... | import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
"""
def __init__(
se... |
__version__ = '0.13.26'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.13.25'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class NestedDoc(BaseDoc):
... | import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDocument
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class SimpleDoc(BaseDocument):
tens: NdArray[10] = Field(dim=1000)
class NestedDoc(B... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(... |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class FusedSemanticHead(BaseModu... | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class FusedSemanticHead(BaseModu... |
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
__all__ = ['DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder']
| # Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
__all__ = ['DropBlock']
|
from docarray.array.array.array import DocumentArray
from docarray.array.stacked.array_stacked import DocumentArrayStacked
__all__ = ['DocumentArray', 'DocumentArrayStacked']
| from docarray.array.array import DocumentArray
from docarray.array.array_stacked import DocumentArrayStacked
__all__ = ['DocumentArray', 'DocumentArrayStacked']
|
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.plotting import run_categorical
try:
import matplotlib
matplotlib.use("Agg")
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mar... | import json
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
try:
import matplotlib
matplotlib.use('Agg')
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotli... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .masktrack_rcnn_tracker import MaskTrackRCNNTracker
from .ocsort_tracker import OCSORTTracker
from .quasi_dense_tracker import QuasiDenseTracker
from .sort_tracker import SORTTracker
from .st... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .masktrack_rcnn_tracker import MaskTrackRCNNTracker
from .quasi_dense_tracker import QuasiDenseTracker
from .sort_tracker import SORTTracker
__all__ = [
'BaseTracker', 'ByteTracker', 'Qu... |
# coding: utf-8
"""Comparison of `binary` and `xentropy` objectives.
BLUF: The `xentropy` objective does logistic regression and generalizes
to the case where labels are probabilistic (i.e. numbers between 0 and 1).
Details: Both `binary` and `xentropy` minimize the log loss and use
`boost_from_average = TRUE` by def... | # coding: utf-8
"""Comparison of `binary` and `xentropy` objectives.
BLUF: The `xentropy` objective does logistic regression and generalizes
to the case where labels are probabilistic (i.e. numbers between 0 and 1).
Details: Both `binary` and `xentropy` minimize the log loss and use
`boost_from_average = TRUE` by def... |
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type... | _base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type... |
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument
from docarray.base_document import DocumentResponse
from docarray.documents import Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmd... | import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument
from docarray.documents import Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDocument):
img: Image
text: T... |
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
... | _base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
... |
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocumentArrayStacked
from docarray.array.abstract_array import AnyDocumentArray
class DocumentArraySummary:
def __init__(self, da: 'AnyDocumentArray'):
... | from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocumentArrayStacked
from docarray.array.abstract_array import AnyDocumentArray
class DocumentArraySummary:
def __init__(self, da: 'AnyDocumentArray'):
... |
"""Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
if docarray_v2:
from docarray import DocList, BaseDoc
def _new_data_request_from_... | """Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
if docarray_v2:
from docarray import DocList
def _new_data_request_from_batch(
... |
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, decode_webp, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
prin... | """Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvisi... |
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._video import Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn... | from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT, Datapoint
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
fro... |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_d... | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_d... |
import os
from typing import Dict
from hubble.executor.helper import is_valid_docker_uri, parse_hub_uri
from hubble.executor.hubio import HubIO
from jina.constants import (
__default_composite_gateway__,
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_... | import os
from typing import Dict
from hubble.executor.helper import is_valid_docker_uri, parse_hub_uri
from hubble.executor.hubio import HubIO
from jina.constants import (
__default_composite_gateway__,
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_... |
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import PointCloud3D
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf... | import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import PointCloud3D
from docarray.utils._internal.misc import is_tf_available
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests import TOYDATA_DIR
tf_available = i... |
"""Test IPEX LLM"""
import os
from typing import Any
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms import IpexLLM
model_ids_to_test = os.getenv("TEST_IPEXLLM_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test, reason="TEST_IPEXLLM_MODEL_... | """Test IPEX LLM"""
import os
from typing import Any
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms import IpexLLM
model_ids_to_test = os.getenv("TEST_IPEXLLM_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test, reason="TEST_IPEXLLM_MODEL_... |
from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.util... | from __future__ import annotations
from copy import deepcopy
from typing import Any, Dict, List, Optional, Sequence, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.utils import get_fr... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.densenet import DenseNet121 as DenseNet121
from keras.src.applications.densenet import DenseNet169 as DenseNet169
from keras.src.applications.densenet import DenseNet201 ... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.densenet import DenseNet121
from keras.src.applications.densenet import DenseNet169
from keras.src.applications.densenet import DenseNet201
from keras.src.applications.de... |
from keras.src.backend.tensorflow import core
from keras.src.backend.tensorflow import distribution_lib
from keras.src.backend.tensorflow import image
from keras.src.backend.tensorflow import linalg
from keras.src.backend.tensorflow import math
from keras.src.backend.tensorflow import nn
from keras.src.backend.tensorfl... | from keras.src.backend.tensorflow import core
from keras.src.backend.tensorflow import distribution_lib
from keras.src.backend.tensorflow import image
from keras.src.backend.tensorflow import linalg
from keras.src.backend.tensorflow import math
from keras.src.backend.tensorflow import nn
from keras.src.backend.tensorfl... |
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePol... | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePol... |
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num... | from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_image_dtype,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
get_dimensions_image_tensor,
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.github.toolkit import (
BranchName,
CommentOnIssue,
CreateFile,
CreatePR,
CreateReviewRequest,
DeleteFile,
DirectoryPat... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.github.toolkit import (
BranchName,
CommentOnIssue,
CreateFile,
CreatePR,
CreateReviewRequest,
DeleteFile,
DirectoryPat... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import MomentoChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import MomentoChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a co... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a co... |
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.util import is_training_available
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051... | from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning r... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):... |
import pathlib
from typing import Any, BinaryIO, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling,... | import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_shardi... |
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''... | from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''
... |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... |
import numpy as np
import pytest
import torch
from docarray import Document, DocumentArray
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 2... | from typing import Optional
import numpy as np
import pytest
import torch
from docarray import Document, DocumentArray
from docarray.typing import NdArray, TorchTensor
def test_stack():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch... |
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .c... | # Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .c... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import FirestoreChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handli... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import FirestoreChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handli... |
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.servers.grpc import GRPCServer
__all__ = ['GRPCGateway']
class GRPCGateway(GRPCServer, BaseGateway):
"""
:class:`GRPCGateway` is a GRPCServer that can be loaded from YAML as any other Gateway
"""
pass
| from jina.serve.runtimes.gateway.grpc.gateway import GRPCGateway
__all__ = ['GRPCGateway']
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessag... | import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessag... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmde... |
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
... | from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
... |
_INITIALIZED = False
_LAZILY_IMPORTED = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
def _init_extension():
import torch
import torchaudio
try:
torchaudio._extension._load_lib("lib... | _INITIALIZED = False
_LAZILY_IMPORTED = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
def _init_extension():
import torch
import torchaudio
try:
torchaudio._extension._load_lib("lib... |
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_sim... | from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similari... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
tea... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
tea... |
import pytest
from langchain_core.utils.iter import batch_iterate
@pytest.mark.parametrize(
("input_size", "input_iterable", "expected_output"),
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300... | import pytest
from langchain_core.utils.iter import batch_iterate
@pytest.mark.parametrize(
("input_size", "input_iterable", "expected_output"),
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.structures import ReIDDataSample
from mmdet.utils import register_all_modules
class TestLinearReIDHead(TestCase):
@classmethod
def setUpClass(cls) -> None:
registe... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.structures import ReIDDataSample
from mmdet.utils import register_all_modules
class TestLinearReIDHead(TestCase):
@classmethod
def setUpClass(cls) -> None:
registe... |
from __future__ import annotations
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
@dataclass
class CrossEncoderDataCollator(SentenceTransformerDataCollator)... | from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
@dataclass
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder mo... |
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocList
from docarray.array import DocVec
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocList[Image]([Image(tensor=torch.zero... | import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocArray
from docarray.array import DocArrayStacked
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocArray[Image]([Image(tensor... |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
import pytest
from jina import Flow
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run(protocol):
f = Flow(protocols=protocol).add()
with f:
dry_run = f.is_flow_ready()
dry_run_negative = f.is_flow_ready()
assert dry_run
assert not dry_run_negative
@py... | import pytest
from jina import Flow
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run(protocol):
f = Flow(protocol=protocol).add()
with f:
dry_run = f.is_flow_ready()
dry_run_negative = f.is_flow_ready()
assert dry_run
assert not dry_run_negative
@pyt... |
"""
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from dat... | """
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from dat... |
"""Notion tool spec."""
from typing import Any, Dict, List, Optional, Type
import requests
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import SPEC_FUNCTION_TYPE, BaseToolSpec
from llama_index.readers.notion import NotionPageReader
SEARCH_URL = "https://api.notion... | """Notion tool spec."""
from typing import Any, Dict, List, Optional, Type
import requests
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import SPEC_FUNCTION_TYPE, BaseToolSpec
from llama_index.readers.notion import NotionPageReader
SEARCH_URL = "https://api.notion... |
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHa... | # CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHa... |
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
def _uri_to_blob(uri: str, timeout=None) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:param timeout: timeout for urlopen. Only relevant if uri is not local
... | import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
def _uri_to_blob(uri: str, timeout=None) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:param timeout: timeout for urlopen. Only relevant if uri is not local
... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__nam... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__nam... |
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.autoembeddings import ChonkieAutoEmbedding
def test_class_init() -> None:
emb = ChonkieAutoEmbedding(model_name="all-MiniLM-L6-v2")
assert isinstance(emb, BaseEmbedding)
| from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.autoembeddings import ChonkieAutoEmbedding
def test_class_init() -> None:
emb = ChonkieAutoEmbedding(model_name="all-MiniLM-L6-v2")
assert isinstance(emb, BaseEmbedding)
|
from torchaudio.utils import ffmpeg_utils
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoFFmpeg
@skipIfNoFFmpeg
class TestFFmpegUtils(PytorchTestCase):
"""Smoke test for ffmpeg_utils module"""
def tearDown(self):
ffmpeg_utils.set_log_level(8)
super().tearDown()
def... | from torchaudio.utils import ffmpeg_utils
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoFFmpeg
@skipIfNoFFmpeg
class TestFFmpegUtils(PytorchTestCase):
"""Smoke test for ffmpeg_utils module"""
def tearDown(self):
ffmpeg_utils.set_log_level(8)
super().tearDown()
def... |
# mypy: allow-untyped-defs
import functools
import hashlib
import inspect
import json
import logging
import os
import time
from typing import Any, Optional
import torch._inductor.config as config
from torch._inductor.codecache import cutlass_key
from torch._inductor.codegen.cuda import cutlass_utils, serialization
fro... | # mypy: allow-untyped-defs
import functools
import hashlib
import json
import logging
import os
import time
from typing import Any, Optional
import torch._inductor.config as config
from torch._inductor.codecache import cutlass_key
from torch._inductor.codegen.cuda.cuda_env import get_cuda_arch, get_cuda_version
from t... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
"""
This examples loads a pre-trained model and evaluates it on the STSbenchmark dataset
Usage:
python evaluation_stsbenchmark.py
OR
python evaluation_stsbenchmark.py model_name
"""
import logging
import os
import sys
import torch
from datasets import load_dataset
from sentence_transformers import SentenceTransform... | """
This examples loads a pre-trained model and evaluates it on the STSbenchmark dataset
Usage:
python evaluation_stsbenchmark.py
OR
python evaluation_stsbenchmark.py model_name
"""
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from dat... |
import os
from typing import Union
from .filesystem import FileSystemReader, FileSystemWriter
from .storage import StorageReader, StorageWriter
def _storage_setup(
storage: Union[StorageReader, StorageWriter, None],
checkpoint_id: Union[str, os.PathLike, None],
reader: bool = False,
) -> Union[None, Stor... | import os
from typing import Union
from .filesystem import FileSystemReader, FileSystemWriter
from .storage import StorageReader, StorageWriter
def _storage_setup(
storage: Union[StorageReader, StorageWriter, None],
checkpoint_id: Union[str, os.PathLike, None],
reader: bool = False,
) -> Union[None, Stor... |
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
YOLOAnchorGenerator)
from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS,
build_anchor_generator, build_prior_generator)
from .point_generator... | from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
YOLOAnchorGenerator)
from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS,
build_anchor_generator, build_prior_generator)
from .point_generator import MlvlPointGenerator, PointGenerator
from ... |
import os
from pathlib import Path
from subprocess import check_call
repo_root = Path(__file__).absolute().parent.parent
third_party_path = repo_root / "third_party"
def _read_file(path: Path) -> str:
with path.open(encoding="utf-8") as f:
return f.read().strip()
def _checkout_by_tag(repo: str, tag: s... | import os
from pathlib import Path
from subprocess import check_call
repo_root = Path(__file__).absolute().parent.parent
third_party_path = repo_root / "third_party"
def _read_file(path: Path) -> str:
with path.open(encoding="utf-8") as f:
return f.read().strip()
def _checkout_by_tag(repo: str, tag: s... |
"""
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demonstrated in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in ... | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- M... |
from __future__ import annotations
from abc import abstractmethod
from typing import Any
import torch
from tokenizers import Tokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from sentence_transformers.models.Module import Module
class InputModule(Module):
"""
Subclass of :... | from __future__ import annotations
from abc import abstractmethod
from typing import Any
import torch
from tokenizers import Tokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from sentence_transformers.models.Module import Module
class InputModule(Module):
"""
Subclass of :... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It ca... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# Syn... |
import pytest
from xgboost import testing as tm
from xgboost.testing.interaction_constraints import (
run_interaction_constraints,
training_accuracy,
)
class TestInteractionConstraints:
def test_exact_interaction_constraints(self) -> None:
run_interaction_constraints(tree_method="exact", device="... | import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestInteractionConstraints:
def run_interaction_constraints(
self, tree_method, feature_names=None, interaction_constraints='[[0, 1]]'
):
x1 = np.ran... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from k... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from... |
from __future__ import annotations
from typing import Any, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, model_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_co... | from __future__ import annotations
from typing import Any, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, model_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_co... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.