input
stringlengths
33
5k
output
stringlengths
32
5k
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class BaseImageProcessorFast(metaclass=DummyObject): _backends = ["torchvision"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchvision"]) class BaseVid...
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class BaseImageProcessorFast(metaclass=DummyObject): _backends = ["torchvision"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchvision"])
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( preprocess_cfg=prepr...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( preprocess_cfg=prepr...
"""Hybrid Fusion Retriever Pack.""" import os from typing import Any, Dict, List from llama_index.core import Settings from llama_index.core.indices.vector_store import VectorStoreIndex from llama_index.core.llama_pack.base import BaseLlamaPack from llama_index.core.query_engine import RetrieverQueryEngine from llama...
"""Hybrid Fusion Retriever Pack.""" import os from typing import Any, Dict, List from llama_index.core import Settings from llama_index.core.indices.vector_store import VectorStoreIndex from llama_index.core.llama_pack.base import BaseLlamaPack from llama_index.core.query_engine import RetrieverQueryEngine from llama...
import unittest import torch import torchaudio.functional as F from parameterized import parameterized from torchaudio_unittest.common_utils import ( PytorchTestCase, skipIfNoSox, TorchaudioTestCase, ) from .functional_impl import Functional, FunctionalCPUOnly class TestFunctionalFloat32(Functional, Fun...
import unittest import torch import torchaudio.functional as F from parameterized import parameterized from torchaudio_unittest.common_utils import PytorchTestCase, TorchaudioTestCase, skipIfNoSox from .functional_impl import Functional, FunctionalCPUOnly class TestFunctionalFloat32(Functional, FunctionalCPUOnly, P...
import numpy as np import torch from docarray import BaseDocument, DocumentArray, Image, Text from docarray.typing import ( AnyEmbedding, AnyTensor, AnyUrl, ImageUrl, Mesh3DUrl, NdArray, PointCloud3DUrl, TextUrl, TorchEmbedding, TorchTensor, ) from docarray.typing.tensor import ...
import numpy as np import torch from docarray import BaseDocument, DocumentArray, Image, Text from docarray.typing import ( AnyTensor, AnyUrl, Embedding, ImageUrl, Mesh3DUrl, NdArray, PointCloud3DUrl, TextUrl, TorchEmbedding, TorchTensor, ) from docarray.typing.tensor import NdA...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge from keras.src.applications.convnext import ConvNe...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.convnext import ConvNeXtBase from keras.src.applications.convnext import ConvNeXtLarge from keras.src.applications.convnext import ConvNeXtSmall from keras.src.applicatio...
""" Computes embeddings """ import numpy as np from sentence_transformers import SentenceTransformer from sentence_transformers.util import get_device_name def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None: """ Test that encode(output_value='token_embeddin...
""" Computes embeddings """ import numpy as np from sentence_transformers import SentenceTransformer def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None: """ Test that encode(output_value='token_embeddings') works :return: """ model = paraphrase_...
""" Feature agglomeration. Base classes and functions for performing feature agglomeration. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from scipy.sparse import issparse from ..base import TransformerMixin from ..utils import metadata_routing from ..utils.de...
""" Feature agglomeration. Base classes and functions for performing feature agglomeration. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from scipy.sparse import issparse from ..base import TransformerMixin from ..utils import metadata_routing from ..utils.de...
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests."""
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests.""" pass
#!/usr/bin/env python3 # Owner(s): ["oncall: distributed"] import contextlib import copyreg import os import sys import torch import torch.distributed as dist if not dist.is_available(): print("Distributed not available, skipping tests", file=sys.stderr) sys.exit(0) import torch.distributed.rpc as rpc impo...
#!/usr/bin/env python3 # Owner(s): ["oncall: distributed"] import contextlib import copyreg import os import sys import torch import torch.distributed as dist if not dist.is_available(): print("Distributed not available, skipping tests", file=sys.stderr) sys.exit(0) import torch.distributed.rpc as rpc impo...
from pathlib import Path from typing import Any, Callable, Optional, Tuple import PIL.Image from .folder import make_dataset from .utils import download_and_extract_archive, verify_str_arg from .vision import VisionDataset class RenderedSST2(VisionDataset): """`The Rendered SST2 Dataset <https://github.com/open...
from pathlib import Path from typing import Any, Callable, Optional, Tuple import PIL.Image from .folder import make_dataset from .utils import download_and_extract_archive, verify_str_arg from .vision import VisionDataset class RenderedSST2(VisionDataset): """`The Rendered SST2 Dataset <https://github.com/open...
"""Test Anthropic API wrapper.""" from typing import List from langchain_core.callbacks import ( CallbackManager, ) from langchain_core.messages import AIMessage, BaseMessage, HumanMessage from langchain_core.outputs import ChatGeneration, LLMResult from langchain_community.chat_models.litellm import ChatLiteLLM...
"""Test Anthropic API wrapper.""" from typing import List from langchain_core.callbacks import ( CallbackManager, ) from langchain_core.messages import AIMessage, BaseMessage, HumanMessage from langchain_core.outputs import ChatGeneration, LLMResult from langchain_community.chat_models.litellm import ChatLiteLLM...
from dataclasses import dataclass, field from typing import Any, Dict, Type import pytest from pydantic import Field from docarray import BaseDoc from docarray.index.abstract import BaseDocIndex from docarray.typing import NdArray pytestmark = pytest.mark.index class SimpleDoc(BaseDoc): tens: NdArray[10] = Fie...
from dataclasses import dataclass, field from typing import Any, Dict, Type import pytest from pydantic import Field from docarray import BaseDocument from docarray.index.abstract import BaseDocumentIndex from docarray.typing import NdArray pytestmark = pytest.mark.index class SimpleDoc(BaseDocument): tens: Nd...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch from .sampling_result import SamplingResult class BaseSampler(metaclass=ABCMeta): """Base class of samplers.""" def __init__(self, num, pos_fraction, neg_p...
from abc import ABCMeta, abstractmethod import torch from .sampling_result import SamplingResult class BaseSampler(metaclass=ABCMeta): """Base class of samplers.""" def __init__(self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=T...
"""Google Trends API Toolkit.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising depr...
"""Google Trends API Toolkit.""" from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising depr...
import logging import time from abc import ABC, abstractmethod from typing import ClassVar from backend.data.model import OAuth2Credentials logger = logging.getLogger(__name__) class BaseOAuthHandler(ABC): # --8<-- [start:BaseOAuthHandler1] PROVIDER_NAME: ClassVar[str] DEFAULT_SCOPES: ClassVar[list[str]...
import logging import time from abc import ABC, abstractmethod from typing import ClassVar from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials logger = logging.getLogger(__name__) class BaseOAuthHandler(ABC): # --8<-- [start:BaseOAuthHandler1] PROVIDER_NAME: ClassVar[str] D...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # dat...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) file_client_args = dict(backend='disk') # comment out the code below to use different file client # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # ...
from __future__ import annotations __all__ = ["Array", "DType", "Device"] _all_ignore = ["cp"] from typing import TYPE_CHECKING import cupy as cp from cupy import ndarray as Array from cupy.cuda.device import Device if TYPE_CHECKING: # NumPy 1.x on Python 3.10 fails to parse np.dtype[] DType = cp.dtype[ ...
from __future__ import annotations __all__ = [ "ndarray", "Device", "Dtype", ] import sys from typing import ( Union, TYPE_CHECKING, ) from cupy import ( ndarray, dtype, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, ) ...
from typing import Union from fastapi import FastAPI, Query app = FastAPI() @app.get("/items/") async def read_items(q: Union[str, None] = Query(min_length=3)): results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]} if q: results.update({"q": q}) return results
from typing import Union from fastapi import FastAPI, Query app = FastAPI() @app.get("/items/") async def read_items(q: Union[str, None] = Query(default=..., min_length=3)): results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]} if q: results.update({"q": q}) return results
from torch.utils.data import Dataset from typing import List from ..readers.InputExample import InputExample import numpy as np from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR class DenoisingAutoEncoderDataset(Dataset): """ The DenoisingAutoEncoderDataset returns InputExamples...
from torch.utils.data import Dataset from typing import List from ..readers.InputExample import InputExample import numpy as np from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR class DenoisingAutoEncoderDataset(Dataset): """ The DenoisingAutoEncoderDataset returns InputExamples...
from typing import TYPE_CHECKING, Any, Dict, List, Type if TYPE_CHECKING: from docarray import BaseDocument def _is_access_path_valid(doc_type: Type['BaseDocument'], access_path: str) -> bool: """ Check if a given access path ("__"-separated) is a valid path for a given Document class. """ from d...
from typing import TYPE_CHECKING, Any, Dict, Type if TYPE_CHECKING: from docarray import BaseDocument def is_access_path_valid(doc: Type['BaseDocument'], access_path: str) -> bool: """ Check if a given access path ("__"-separated) is a valid path for a given Document class. """ from docarray impo...
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is # redefined at each test that fixture # ruff: noqa import pytest from pydantic import Field from docarray import BaseDoc from docarray.index.backends.weaviate import WeaviateDocumentIndex from tests.index.weaviate.fixture_wea...
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is # redefined at each test that fixture # ruff: noqa import pytest from pydantic import Field from docarray import BaseDoc from docarray.index.backends.weaviate import WeaviateDocumentIndex from tests.index.weaviate.fixture_wea...
import csv import os from pathlib import Path from typing import Tuple, Union import torchaudio from torch import Tensor from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import extract_archive _RELEASE_CONFIGS = { "release1": { "folder_in_arch...
import csv import os from pathlib import Path from typing import Tuple, Union import torchaudio from torch import Tensor from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import extract_archive _RELEASE_CONFIGS = { "release1": { "folder_in_arch...
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium from .conformer import Conformer from .conv_tasnet import conv_tasnet_base, ConvTasNet from .deepspeech import DeepSpeech from .emformer import Emformer from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT from .rnnt_decoder import Hypo...
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium from .conformer import Conformer from .conv_tasnet import ConvTasNet from .deepspeech import DeepSpeech from .emformer import Emformer from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT from .rnnt_decoder import Hypothesis, RNNTBeamSe...
import warnings from typing import Any, List, Union import torch from torchvision import datapoints from torchvision.transforms import functional as _F @torch.jit.unused def to_tensor(inpt: Any) -> torch.Tensor: warnings.warn( "The function `to_tensor(...)` is deprecated and will be removed in a future ...
import warnings from typing import Any, List, Union import torch from torchvision import datapoints from torchvision.transforms import functional as _F @torch.jit.unused def to_tensor(inpt: Any) -> torch.Tensor: warnings.warn( "The function `to_tensor(...)` is deprecated and will be removed in a future ...
import os import pypdf import pytest import tempfile from fpdf import FPDF from llama_index.readers.file import PDFReader from pathlib import Path from typing import Dict @pytest.fixture() def multi_page_pdf() -> FPDF: pdf = FPDF() pdf.add_page() pdf.set_font("Helvetica", size=12) pdf.cell(200, 10, te...
import os import pypdf import pytest import tempfile from fpdf import FPDF from llama_index.readers.file import PDFReader from pathlib import Path from typing import Dict @pytest.fixture() def multi_page_pdf() -> FPDF: pdf = FPDF() pdf.add_page() pdf.set_font("Helvetica", size=12) pdf.cell(200, 10, te...
from urllib.parse import urlparse, urlunparse import pytest from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface from llama_index.postprocessor.nvidia_rerank.utils import BASE_URL import respx @pytest.fixture() def mock_v1_local_models2(respx_mock: respx.MockRouter, base_url: str) -> None: ...
from urllib.parse import urlparse, urlunparse import pytest from requests_mock import Mocker from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface @pytest.fixture() def mock_v1_local_models2(requests_mock: Mocker, base_url: str) -> None: parsed = urlparse(base_url) normalized_path = p...
from enum import Enum from typing import TYPE_CHECKING, Union, overload import numpy as np if TYPE_CHECKING: import torch class Pooling(str, Enum): """Enum of possible pooling choices with pooling behaviors.""" CLS = "cls" MEAN = "mean" LAST = "last" # last token pooling def __call__(self...
from enum import Enum from typing import TYPE_CHECKING, Union, overload import numpy as np if TYPE_CHECKING: import torch class Pooling(str, Enum): """Enum of possible pooling choices with pooling behaviors.""" CLS = "cls" MEAN = "mean" LAST = "last" # last token pooling def __call__(self...
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py'] # optimizer model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) optim_wrapper = dict( optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' ] # optimizer model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) optim_wrapper = dict( optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
import base64 import email from typing import Dict, Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from pydantic import BaseModel, Field from langchain_community.tools.gmail.base import GmailBaseTool from langchain_community.tools.gmail.utils import clean_email_body class SearchArgsSc...
import base64 import email from typing import Dict, Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from pydantic import BaseModel, Field from langchain_community.tools.gmail.base import GmailBaseTool from langchain_community.tools.gmail.utils import clean_email_body class SearchArgsSc...
import numpy as np import pytest from docarray import BaseDocument, DocumentArray from docarray.documents import ImageDoc, TextDoc from docarray.typing import NdArray @pytest.mark.proto def test_simple_proto(): class CustomDoc(BaseDocument): text: str tensor: NdArray da = DocumentArray( ...
import numpy as np import pytest from docarray import BaseDocument, DocumentArray from docarray.array.stacked.array_stacked import DocumentArrayStacked from docarray.documents import ImageDoc, TextDoc from docarray.typing import NdArray @pytest.mark.proto def test_simple_proto(): class CustomDoc(BaseDocument): ...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_v...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_v...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmcv.cnn import is_norm from torch.nn.modules import GroupNorm from mmdet.models.utils import InvertedResidual, SELayer def test_inverted_residual(): with pytest.raises(AssertionError): # stride must be in [1, 2] Inv...
import pytest import torch from mmcv.cnn import is_norm from torch.nn.modules import GroupNorm from mmdet.models.utils import InvertedResidual, SELayer def test_inverted_residual(): with pytest.raises(AssertionError): # stride must be in [1, 2] InvertedResidual(16, 16, 32, stride=3) with py...
# training schedule for 1x train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop') # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='Mu...
# training schedule for 1x train_cfg = dict(by_epoch=True, max_epochs=12) val_cfg = dict(interval=1) test_cfg = dict() # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=12, ...
# coding: utf-8 from pathlib import Path import pandas as pd import lightgbm as lgb if lgb.compat.MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You need to install matplotlib and restart your session for plot_example.py.') print('Loading data...') # load or create your datas...
# coding: utf-8 from pathlib import Path import pandas as pd import lightgbm as lgb if lgb.compat.MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You need to install matplotlib and restart your session for plot_example.py.') print('Loading data...') # load or create your datas...
_base_ = './htc_without_semantic_r50_fpn_1x_coco.py' model = dict( data_preprocessor=dict(pad_seg=True), roi_head=dict( semantic_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, ...
_base_ = './htc_without_semantic_r50_fpn_1x_coco.py' model = dict( roi_head=dict( semantic_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[8]), semanti...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict( type='Loa...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}), dict( type='Loa...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from typing import List import numpy as np import pytest from jina import Document, DocumentArray, Flow from paddle_image import ImagePaddlehubEncoder @pytest.mark.parametrize( 'arr_in', [...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from typing import List import numpy as np import pytest from jina import Document, DocumentArray, Flow from ...paddle_image import ImagePaddlehubEncoder @pytest.mark.parametrize( 'arr_in', ...
# Copyright (c) OpenMMLab. All rights reserved. # flake8: noqa from .config import * from .data import * from .dataset import * from .device import * from .fileio import * from .hooks import * from .logging import * from .registry import * from .runner import * from .utils import * from .version import __version__, ver...
# Copyright (c) OpenMMLab. All rights reserved. # flake8: noqa from .config import * from .data import * from .dataset import * from .device import * from .fileio import * from .hooks import * from .logging import * from .registry import * from .runner import * from .utils import * from .visualization import *
from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple if TYPE_CHECKING: from docarray import DocumentArray from docarray.typing import AnyDNN, T, ArrayType import numpy as np class SingletonSugarMixin: """Provide sugary syntax for :class:`Document` by inheriting methods from :...
from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple if TYPE_CHECKING: from ... import DocumentArray from ...typing import AnyDNN, T, ArrayType import numpy as np class SingletonSugarMixin: """Provide sugary syntax for :class:`Document` by inheriting methods from :class:`Docu...
import json import os import pytest from hubble.executor import HubExecutor from hubble.executor.hubio import HubIO from jina import __version__ from jina.orchestrate.deployments.config.helper import ( get_base_executor_version, get_image_name, to_compatible_name, ) @pytest.mark.parametrize('is_master',...
import json import os import pytest from hubble.executor import HubExecutor from hubble.executor.hubio import HubIO from jina import __version__ from jina.orchestrate.deployments.config.helper import ( get_base_executor_version, get_image_name, to_compatible_name, ) @pytest.mark.parametrize('is_master',...
# Copyright (c) OpenMMLab. All rights reserved. from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403 from ._utils import (demo_mm_inputs, demo_mm_proposals, demo_mm_sampling_results, get_detector_cfg, get_roi_head_cfg) __all__ = [ 'demo_mm_inputs',...
# Copyright (c) OpenMMLab. All rights reserved. from ._utils import (demo_mm_inputs, demo_mm_proposals, demo_mm_sampling_results, get_detector_cfg, get_roi_head_cfg) __all__ = [ 'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg', 'demo_mm_proposals', 'demo_mm_sam...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')] visualizer = dict(vis_backends=vis_backends) # MMEngine support the ...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] vis_backends = [dict(type='LocalVisBackend'), dict(type='WandBVisBackend')] visualizer = dict(vis_backends=vis_backends) # MMEngine support the ...
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parq...
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache # noqa F401 from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pand...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
from langchain_core.document_loaders import BaseBlobParser, BaseLoader __all__ = ["BaseBlobParser", "BaseLoader"]
from langchain_core.document_loaders import BaseBlobParser, BaseLoader __all__ = ["BaseLoader", "BaseBlobParser"]
"""Utilities to route metadata within scikit-learn estimators.""" # This module is not a separate sub-folder since that would result in a circular # import issue. # # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._metadata_requests import ( # noqa: F401 UNCHANGED, UNUSED,...
"""Utilities to route metadata within scikit-learn estimators.""" # This module is not a separate sub-folder since that would result in a circular # import issue. # # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from ._metadata_requests import WARN, UNUSED, UNCHANGED # noqa from ._met...
from functools import partial from typing import Any, Optional import torch import torch.nn as nn from ..transforms._presets import ImageClassification from ..utils import _log_api_usage_once from ._api import register_model, Weights, WeightsEnum from ._meta import _IMAGENET_CATEGORIES from ._utils import _ovewrite_n...
from functools import partial from typing import Any, Optional import torch import torch.nn as nn from ..transforms._presets import ImageClassification from ..utils import _log_api_usage_once from ._api import register_model, Weights, WeightsEnum from ._meta import _IMAGENET_CATEGORIES from ._utils import _ovewrite_n...
import warnings from typing import Any, Dict, Union import numpy as np import PIL.Image import torch from torchvision.transforms import functional as _F from torchvision.transforms.v2 import Transform class ToTensor(Transform): """[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly....
import warnings from typing import Any, Dict, Union import numpy as np import PIL.Image import torch from torchvision.transforms import functional as _F from torchvision.transforms.v2 import Transform class ToTensor(Transform): """[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly....
# Copyright (c) OpenMMLab. All rights reserved. import argparse import warnings from mmcv import Config, DictAction from mmdet.utils import replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser(description='Print the whole config') parser.add_argument('config', help='config f...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import warnings from mmcv import Config, DictAction from mmdet.utils import update_data_root def parse_args(): parser = argparse.ArgumentParser(description='Print the whole config') parser.add_argument('config', help='config file path') par...
import argparse import functools import traceback from typing import Callable, List, Optional, Tuple from torch.utils.jit.log_extract import ( extract_ir, load_graph_and_inputs, run_baseline_no_fusion, run_nnc, run_nvfuser, ) """ Usage: 1. Run your script and pipe into a log file PYTORCH_JIT_LO...
import argparse import functools import traceback from typing import Callable, List, Optional, Tuple from torch.utils.jit.log_extract import ( extract_ir, load_graph_and_inputs, run_baseline_no_fusion, run_nnc, run_nvfuser, ) """ Usage: 1. Run your script and pipe into a log file PYTORCH_JIT_LO...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class ATSS(SingleStageDetector): """Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class ATSS(SingleStageDetector): """Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_.""" def __init__(self, backbone, ...
from __future__ import annotations from typing import Any, Optional, Union import torch from ._tv_tensor import TVTensor class Video(TVTensor): """:class:`torch.Tensor` subclass for videos. Args: data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`. dt...
from __future__ import annotations from typing import Any, Optional, Union import torch from ._tv_tensor import TVTensor class Video(TVTensor): """[BETA] :class:`torch.Tensor` subclass for videos. Args: data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`. ...
""" ================================================= Novelty detection with Local Outlier Factor (LOF) ================================================= The Local Outlier Factor (LOF) algorithm is an unsupervised anomaly detection method which computes the local density deviation of a given data point with respect to...
""" ================================================= Novelty detection with Local Outlier Factor (LOF) ================================================= The Local Outlier Factor (LOF) algorithm is an unsupervised anomaly detection method which computes the local density deviation of a given data point with respect to...
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch import Tensor from mmdet.core.utils.typing import MultiConfig, OptConfigType from mmdet.models.utils import ResLayer, SimplifiedBasicBlo...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16, force_fp32 from mmdet.models.utils import ResLayer, SimplifiedBasicBlock from mmdet.registry import MODELS @MODELS.register_module() class GlobalContextHead(BaseModule)...
import json import os from typing import List import torch from torch import nn class LSTM(nn.Module): """Bidirectional LSTM running over word embeddings.""" def __init__( self, word_embedding_dimension: int, hidden_dim: int, num_layers: int = 1, dropout: float = 0, ...
import torch from torch import nn from typing import List import os import json class LSTM(nn.Module): """ Bidirectional LSTM running over word embeddings. """ def __init__(self, word_embedding_dimension: int, hidden_dim: int, num_layers: int = 1, dropout: float = 0, bidirectional: bool = True): ...
_base_ = './rtmdet_s_8xb32-300e_coco.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa model = dict( backbone=dict( deepen_factor=0.167, widen_factor=0.375, init_cfg=dict( type='Pretrained', pre...
_base_ = './rtmdet_s_8xb32-300e_coco.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa model = dict( backbone=dict( deepen_factor=0.167, widen_factor=0.375, init_cfg=dict( type='Pretrained', pre...
import json import multiprocessing import os import time import pytest from docarray import DocumentArray from jina import Executor, requests from jina.helper import random_port from jina.parsers import set_gateway_parser, set_pod_parser from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes....
import json import multiprocessing import os import time import pytest from docarray import DocumentArray from jina import Executor, requests from jina.helper import random_port from jina.parsers import set_gateway_parser, set_pod_parser from jina.serve.runtimes.gateway import GatewayRuntime from jina.serve.runtimes....
import gc import unittest import numpy as np import pytest import torch from diffusers import FluxPipeline, FluxPriorReduxPipeline from diffusers.utils import load_image from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, numpy_cosine_similarity_distance, require_big_acceler...
import gc import unittest import numpy as np import pytest import torch from diffusers import FluxPipeline, FluxPriorReduxPipeline from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, numpy_cosine_similarity_distance, require_big_accelerator, slow, ...
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch from mmengine.utils.dl_utils import torch_meshgrid def test_torch_meshgrid(): # torch_meshgrid should not throw warning with warnings.catch_warnings(): warnings.simplefilter('error') x = torch.tensor([1, 2, 3]) ...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmengine.utils.dl_utils import torch_meshgrid def test_torch_meshgrid(): # torch_meshgrid should not throw warning with pytest.warns(None) as record: x = torch.tensor([1, 2, 3]) y = torch.tensor([4, 5, 6]) ...
"""Utilities for the CI.""" import os from datetime import datetime, timedelta from functools import wraps from typing import Any, Callable, Dict, TypedDict, TypeVar, Union class DirectoryExcursion: def __init__(self, path: Union[os.PathLike, str]) -> None: self.path = path self.curdir = os.path....
"""Utilities for the CI.""" import os from datetime import datetime, timedelta from functools import wraps from typing import Any, Callable, Dict, TypedDict, TypeVar, Union class DirectoryExcursion: def __init__(self, path: Union[os.PathLike, str]) -> None: self.path = path self.curdir = os.path.n...
from typing import ( TYPE_CHECKING, Sequence, ) import numpy as np from docarray.helper import typename if TYPE_CHECKING: from docarray.typing import ( DocumentArrayIndexType, ) class DelItemMixin: """Provide help function to enable advanced indexing in `__delitem__`""" def __delit...
from typing import ( TYPE_CHECKING, Sequence, ) import numpy as np from docarray.helper import typename if TYPE_CHECKING: from docarray.typing import ( DocumentArrayIndexType, ) class DelItemMixin: """Provide help function to enable advanced indexing in `__delitem__`""" def __delit...
"""Bedrock Retriever.""" from typing import List, Optional, Dict, Any from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode from llama_index.core.utilities.aws_utils import get_...
"""Bedrock Retriever.""" from typing import List, Optional, Dict, Any from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode from llama_index.core.utilities.aws_utils import get_...
_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512]))
_base_ = './retinanet_r50_fpn_lsj_200e_8x8_fp16_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512]))
"""Pydantic output parser.""" import json from typing import Any, Generic, List, Optional, Type from llama_index.core.output_parsers.base import ChainableOutputParser from llama_index.core.output_parsers.utils import extract_json_str from llama_index.core.types import Model PYDANTIC_FORMAT_TMPL = """ Here's a JSON s...
"""Pydantic output parser.""" import json from typing import Any, Generic, List, Optional, Type from llama_index.core.output_parsers.base import ChainableOutputParser from llama_index.core.output_parsers.utils import extract_json_str from llama_index.core.types import Model PYDANTIC_FORMAT_TMPL = """ Here's a JSON s...
class WorkflowValidationError(Exception): pass class WorkflowTimeoutError(Exception): pass class WorkflowRuntimeError(Exception): pass class WorkflowDone(Exception): pass class WorkflowCancelledByUser(Exception): pass class WorkflowStepDoesNotExistError(Exception): pass class Workflo...
class WorkflowValidationError(Exception): pass class WorkflowTimeoutError(Exception): pass class WorkflowRuntimeError(Exception): pass class WorkflowDone(Exception): pass class WorkflowCancelledByUser(Exception): pass class WorkflowStepDoesNotExistError(Exception): pass
from collections import defaultdict from time import time import numpy as np from numpy import random as nr from sklearn.cluster import KMeans, MiniBatchKMeans def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) chunk = 100 max_it = len(samples_range) * len(fe...
from collections import defaultdict from time import time import numpy as np from numpy import random as nr from sklearn.cluster import KMeans, MiniBatchKMeans def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) chunk = 100 max_it = len(samples_range) * len(fe...
""" =================================== How to write your own v2 transforms =================================== .. note:: Try on `Colab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_ or :ref:`go to the end <sphx_glr_downloa...
""" =================================== How to write your own v2 transforms =================================== .. note:: Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_ or :ref:`go to the end <sphx_glr_downlo...
# Copyright (c) OpenMMLab. All rights reserved. import logging from typing import Any, List, Optional, Sequence, Tuple import torch from torch.nn.parameter import Parameter from torch.nn.utils import clip_grad from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook DATA_BA...
# Copyright (c) OpenMMLab. All rights reserved. import logging from typing import Any, List, Optional, Sequence, Tuple import torch from torch.nn.parameter import Parameter from torch.nn.utils import clip_grad from mmengine.data import BaseDataSample from mmengine.registry import HOOKS from .hook import Hook DATA_BA...
DEEPSEEK_MODEL_TO_CONTEXT_WINDOW = { "deepseek-chat": 64000, "deepseek-reasoner": 64000, } FUNCTION_CALLING_MODELS = {"deepseek-chat"} def get_context_window(model: str) -> int: return DEEPSEEK_MODEL_TO_CONTEXT_WINDOW.get(model, 64000)
DEEPSEEK_MODEL_TO_CONTEXT_WINDOW = { "deepseek-chat": 64000, "deepseek-reasoner": 64000, } def get_context_window(model: str) -> int: return DEEPSEEK_MODEL_TO_CONTEXT_WINDOW.get(model, 64000)
from typing import Any, Optional from typing_inspect import get_args, is_union_type from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True if type is a type Tensor or an Optional Tensor type.""" return isinstance(type_, type) and issubclass(t...
from typing import Any from typing_inspect import get_args, is_union_type from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True if type is a type Tensor or an Optional Tensor type.""" return isinstance(type_, type) and issubclass(type_, Abst...
from typing import List, Iterable import collections import string import os import json import logging from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR logger = logging.getLogger(__name__) class PhraseTokenizer(WordTokeniz...
from typing import List, Iterable import collections import string import os import json import logging from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR logger = logging.getLogger(__name__) class PhraseTokenizer(WordTokeniz...
from typing import Union from langchain_core._api import deprecated from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers.openai_tools import PydanticToolsParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import Runnable from langchain_...
from typing import Union from langchain_core._api import deprecated from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers.openai_tools import PydanticToolsParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import Runnable from langchain_...
""" Quantile Regression =================== .. versionadded:: 2.0.0 The script is inspired by this awesome example in sklearn: https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html .. note:: The feature is only supported using the Python, R, and C packages. In addition,...
""" Quantile Regression =================== .. versionadded:: 2.0.0 The script is inspired by this awesome example in sklearn: https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html .. note:: The feature is only supported using the Python package. In addition, quantile ...
from keras.src import tree from keras.src.api_export import keras_export from keras.src.backend import KerasTensor from keras.src.layers.layer import Layer @keras_export("keras.layers.Identity") class Identity(Layer): """Identity layer. This layer should be used as a placeholder when no operation is to be ...
from keras.src import tree from keras.src.api_export import keras_export from keras.src.backend import KerasTensor from keras.src.layers.layer import Layer @keras_export("keras.layers.Identity") class Identity(Layer): """Identity layer. This layer should be used as a placeholder when no operation is to be ...
from __future__ import annotations from pathlib import Path import torch from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.models import IDF from tests.sparse_encoder.utils import sparse_allclose def test_idf_padding_ignored(inference_free_splade_bert_tiny_model: SparseEncod...
from __future__ import annotations import torch from sentence_transformers import SparseEncoder from tests.sparse_encoder.utils import sparse_allclose def test_idf_padding_ignored(inference_free_splade_bert_tiny_model: SparseEncoder): model = inference_free_splade_bert_tiny_model input_texts = ["This is a ...
""" PostgresML index. An index that is built on top of PostgresML. """ import logging from typing import Any, List, Optional, Dict from llama_index.core.async_utils import run_async_tasks from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from ll...
"""PostgresML index. An index that is built on top of PostgresML. """ import logging from typing import Any, List, Optional, Dict from llama_index.core.async_utils import run_async_tasks from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.callbacks.base import CallbackManager from lla...
import pytest from typing import Dict, List from llama_index.core.llms import ChatMessage, MessageRole, TextBlock, AudioBlock from llama_index.voice_agents.elevenlabs.utils import ( callback_agent_message, callback_agent_message_correction, callback_latency_measurement, callback_user_message, ...
import pytest from typing import Dict, List from llama_index.core.llms import ChatMessage, MessageRole, TextBlock, AudioBlock from llama_index.voice_agents.elevenlabs.utils import ( callback_agent_message, callback_agent_message_correction, callback_latency_measurement, callback_user_message, ...
from typing import Dict, Type from llama_index.core.node_parser.file.html import HTMLNodeParser from llama_index.core.node_parser.file.json import JSONNodeParser from llama_index.core.node_parser.file.markdown import MarkdownNodeParser from llama_index.core.node_parser.file.simple_file import SimpleFileNodeParser from...
from typing import Dict, Type from llama_index.core.node_parser.file.html import HTMLNodeParser from llama_index.core.node_parser.file.json import JSONNodeParser from llama_index.core.node_parser.file.markdown import MarkdownNodeParser from llama_index.core.node_parser.file.simple_file import SimpleFileNodeParser from...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import shutil import time from unittest import TestCase from unittest.mock import Mock import torch from mmengine.structures import InstanceData from mmdet.engine.hooks import DetVisualizationHook from mmdet.structures import DetDataSample from mmd...
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import shutil import time from unittest import TestCase from unittest.mock import Mock import torch from mmengine.structures import InstanceData from mmdet.engine.hooks import DetVisualizationHook from mmdet.structures import DetDataSample from mmd...
from datetime import datetime import pytest from autogpt_libs.supabase_integration_credentials_store.store import openai_credentials from prisma.models import UserBlockCredit from backend.blocks.llm import AITextGeneratorBlock from backend.data.credit import UserCredit from backend.data.user import DEFAULT_USER_ID fr...
from datetime import datetime import pytest from prisma.models import UserBlockCredit from backend.blocks.llm import AITextGeneratorBlock from backend.data.credit import UserCredit from backend.data.user import DEFAULT_USER_ID from backend.util.test import SpinTestServer REFILL_VALUE = 1000 user_credit = UserCredit(...
from typing import Any import pytest from langchain_tests.conftest import CustomPersister, CustomSerializer from langchain_tests.conftest import _base_vcr_config as _base_vcr_config from vcr import VCR # type: ignore[import-untyped] def remove_request_headers(request: Any) -> Any: for k in request.headers: ...
from typing import Any import pytest from langchain_tests.conftest import CustomPersister, CustomSerializer from langchain_tests.conftest import _base_vcr_config as _base_vcr_config from vcr import VCR # type: ignore[import-untyped] def remove_request_headers(request: Any) -> Any: for k in request.headers: ...
from typing import TYPE_CHECKING from .compass import CompassWebhookManager from .github import GithubWebhooksManager from .slant3d import Slant3DWebhooksManager if TYPE_CHECKING: from ..providers import ProviderName from ._base import BaseWebhooksManager # --8<-- [start:WEBHOOK_MANAGERS_BY_NAME] WEBHOOK_MAN...
from typing import TYPE_CHECKING from .github import GithubWebhooksManager from .slant3d import Slant3DWebhooksManager if TYPE_CHECKING: from ..providers import ProviderName from .base import BaseWebhooksManager # --8<-- [start:WEBHOOK_MANAGERS_BY_NAME] WEBHOOK_MANAGERS_BY_NAME: dict["ProviderName", type["Ba...
""" This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix) and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages. The WikiMatrix mined parallel sentences from Wikipedia in various languages. ...
""" This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix) and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages. The WikiMatrix mined parallel sentences from Wikipedia in various languages. ...
""" Wrapper script to run a command inside a Docker container """ import argparse import grp import itertools import os import pathlib import pwd import subprocess import sys import textwrap OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent PROJECT_ROOT_DIR = OPS_DIR.parent LINEWIDTH = 88 TEXT_WRAPPER = ...
""" Wrapper script to run a command inside a Docker container """ import argparse import grp import itertools import os import pathlib import pwd import subprocess import sys import textwrap OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent PROJECT_ROOT_DIR = OPS_DIR.parent LINEWIDTH = 88 TEXT_WRAPPER = ...
from __future__ import annotations from typing import Any, Dict, Iterator, List from urllib.parse import urlparse from langchain_core.embeddings import Embeddings from pydantic import BaseModel, PrivateAttr def _chunk(texts: List[str], size: int) -> Iterator[List[str]]: for i in range(0, len(texts), size): ...
from __future__ import annotations from typing import Any, Dict, Iterator, List from urllib.parse import urlparse from langchain_core.embeddings import Embeddings from pydantic import BaseModel, PrivateAttr def _chunk(texts: List[str], size: int) -> Iterator[List[str]]: for i in range(0, len(texts), size): ...
import json from typing import Dict import pytest from jina.orchestrate.deployments.config.k8slib.kubernetes_tools import get_yaml @pytest.mark.parametrize( ['template', 'params'], [ ('namespace', {'name': 'test-ns'}), ('service', {'name': 'test-svc'}), ('deployment-executor', {'name...
import json from typing import Dict import pytest from jina.orchestrate.deployments.config.k8slib.kubernetes_tools import get_yaml @pytest.mark.parametrize( ['template', 'params'], [ ('namespace', {'name': 'test-ns'}), ('service', {'name': 'test-svc'}), ('deployment', {'name': 'test-...
from sentence_transformers import SentenceTransformer from . import SentenceEvaluator from typing import Dict, Iterable class SequentialEvaluator(SentenceEvaluator): """ This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated, the data is passed sequentially to all sub-e...
from sentence_transformers import SentenceTransformer from . import SentenceEvaluator from typing import Dict, Iterable class SequentialEvaluator(SentenceEvaluator): """ This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated, the data is passed sequentially to all sub-e...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray')...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray')...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.utilities.opaqueprompts import desanitize, sanitize # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling opt...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.utilities.opaqueprompts import desanitize, sanitize # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling opt...
# Copyright (c) OpenMMLab. All rights reserved. import ast import os.path as osp import re import warnings from typing import Tuple from mmengine.fileio import load from mmengine.utils import check_file_exist PKG2PROJECT = { 'mmcls': 'mmcls', 'mmdet': 'mmdet', 'mmdet3d': 'mmdet3d', 'mmseg': 'mmsegment...
# Copyright (c) OpenMMLab. All rights reserved. import ast import os.path as osp import re import warnings from typing import Tuple from mmengine.fileio import load from mmengine.utils import check_file_exist PKG2PROJECT = { 'mmcls': 'mmcls', 'mmdet': 'mmdet', 'mmdet3d': 'mmdet3d', 'mmseg': 'mmsegment...
from llama_index.core import Document import asyncio import pytest from llama_index.graph_rag.cognee import CogneeGraphRAG @pytest.mark.asyncio() async def test_add_data(monkeypatch): # Instantiate cognee GraphRAG cogneeGraphRAG = CogneeGraphRAG( llm_api_key="", llm_provider="openai", ...
from llama_index.core import Document import asyncio import pytest from llama_index.graph_rag.cognee import CogneeGraphRAG @pytest.mark.asyncio() async def test_add_data(monkeypatch): # Instantiate cognee GraphRAG cogneeGraphRAG = CogneeGraphRAG( llm_api_key="", llm_provider="openai", ...
""" This script trains sentence transformers with a triplet loss function. As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks. """ import traceback from sentence_transformers import SentenceTransformer from sentence_transf...
""" This script trains sentence transformers with a triplet loss function. As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks. """ from sentence_transformers import SentenceTransformer, InputExample, LoggingHandler, losses...
# DO NOT EDIT. Generated by api_gen.sh from keras.api import DTypePolicy from keras.api import FloatDTypePolicy from keras.api import Function from keras.api import Initializer from keras.api import Input from keras.api import InputSpec from keras.api import KerasTensor from keras.api import Layer from keras.api import...
# DO NOT EDIT. Generated by api_gen.sh from keras.api import DTypePolicy from keras.api import FloatDTypePolicy from keras.api import Function from keras.api import Initializer from keras.api import Input from keras.api import InputSpec from keras.api import KerasTensor from keras.api import Layer from keras.api import...
from typing import NamedTuple, TypeVar import numpy as np from pydantic import parse_obj_as from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.url.url_3d.url_3d import Url3D T = TypeVar('T', bound='Mesh3DUrl') class Mesh3DLoadResult(Na...
from typing import NamedTuple, TypeVar import numpy as np from pydantic import parse_obj_as from docarray.typing import NdArray from docarray.typing.proto_register import _register_proto from docarray.typing.url.url_3d.url_3d import Url3D T = TypeVar('T', bound='Mesh3DUrl') class Mesh3DLoadResult(NamedTuple): ...
from typing import List, Sequence from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent from llama_index.core.agent.workflow.workflow_events import ( AgentInput, AgentOutput, AgentStream, ToolCallResult, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.l...
from typing import List, Sequence from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent from llama_index.core.agent.workflow.workflow_events import ( AgentInput, AgentOutput, AgentStream, ToolCallResult, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.l...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2 from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2 from keras.src.applications.resnet_v2 import ResNet152V2...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.resnet_v2 import ResNet50V2 from keras.src.applications.resnet_v2 import ResNet101V2 from keras.src.applications.resnet_v2 import ResNet152V2 from keras.src.applications....
import numpy as np import pytest from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.computation.tensorflow_backend import TensorFlowCompBackend from docarray.typing import TensorFlowTensor @pytest.mark.tensor...
import numpy as np import pytest from docarray.utils.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.computation.tensorflow_backend import TensorFlowCompBackend from docarray.typing import TensorFlowTensor @pytest.mark.tensorflow @pyte...