input
stringlengths
33
5k
output
stringlengths
32
5k
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, ...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, ...
_base_ = 'ssd300_voc0712.py' input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256),...
_base_ = 'ssd300_voc0712.py' input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256), ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_loaders.utils import ( map_ai_messages, map_ai_messages_in_session, merge_chat_runs, merge_chat_runs_in_session, ) # Create a way to dynamically loo...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_loaders.utils import ( map_ai_messages, map_ai_messages_in_session, merge_chat_runs, merge_chat_runs_in_session, ) # Create a way to dynamically loo...
from typing import Dict, List, Tuple import pytest from opentelemetry.metrics import Meter from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( HistogramDataPoint, InMemoryMetricReader, Metric, ) from jina.serve.networking.instrumentation import _NetworkingHis...
from typing import Dict, List, Tuple import pytest from opentelemetry.metrics import Meter from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( HistogramDataPoint, InMemoryMetricReader, Metric, ) from jina.serve.networking import _NetworkingHistograms @pytes...
""" Computes embeddings """ import numpy as np from sentence_transformers import SentenceTransformer def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None: """ Test that encode(output_value='token_embeddings') works :return: """ model = paraphrase_...
""" Computes embeddings """ import numpy as np from sentence_transformers import SentenceTransformer def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None: """ Test that encode(output_value='token_embeddings') works :return: """ model = paraphrase...
"""Download llama-pack as template.""" import logging import os import subprocess import sys from importlib import util from pathlib import Path from typing import Any, Optional, Union import requests from llama_index.core.download.utils import ( ChangeDirectory, get_file_content, initialize_directory, ...
"""Download llama-pack as template.""" import logging import os import subprocess import sys from importlib import util from pathlib import Path from typing import Any, Optional, Union import requests from llama_index.core.download.utils import ( ChangeDirectory, get_file_content, initialize_directory, ...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
import logging from typing import Any from backend.data.block import ( Block, BlockCategory, BlockInput, BlockOutput, BlockSchema, BlockType, get_block, ) from backend.data.execution import ExecutionStatus from backend.data.model import SchemaField from backend.util import json logger = lo...
import logging from typing import Any from backend.data.block import ( Block, BlockCategory, BlockInput, BlockOutput, BlockSchema, BlockType, get_block, ) from backend.data.execution import ExecutionStatus from backend.data.model import SchemaField from backend.util import json logger = lo...
import pathlib from typing import Any, Callable, Optional, Union from .folder import default_loader from .utils import verify_str_arg from .vision import VisionDataset class StanfordCars(VisionDataset): """Stanford Cars Dataset The Cars dataset contains 16,185 images of 196 classes of cars. The data is ...
import pathlib from typing import Any, Callable, Optional, Tuple, Union from .folder import default_loader from .utils import verify_str_arg from .vision import VisionDataset class StanfordCars(VisionDataset): """Stanford Cars Dataset The Cars dataset contains 16,185 images of 196 classes of cars. The dat...
from typing import Any, Optional from typing_extensions import override from langchain_core.caches import RETURN_VAL_TYPE, BaseCache from langchain_core.globals import set_llm_cache from langchain_core.language_models import FakeListLLM class InMemoryCache(BaseCache): """In-memory cache used for testing purpose...
from typing import Any, Optional from langchain_core.caches import RETURN_VAL_TYPE, BaseCache from langchain_core.globals import set_llm_cache from langchain_core.language_models import FakeListLLM class InMemoryCache(BaseCache): """In-memory cache used for testing purposes.""" def __init__(self) -> None: ...
import numpy as np import pytest from pydantic import parse_obj_as from docarray.base_document.document import BaseDocument from docarray.documents import Mesh3D from tests import TOYDATA_DIR LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj') REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'...
import numpy as np import pytest from pydantic import parse_obj_as from docarray import BaseDocument from docarray.documents import Mesh3D from tests import TOYDATA_DIR LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj') REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj' @pytest.mark.slow @p...
from __future__ import annotations from typing import Union, Sequence, Literal import torch import torch.fft from torch.fft import * # noqa: F403 from ._typing import Array # Several torch fft functions do not map axes to dim def fftn( x: Array, /, *, s: Sequence[int] = None, axes: Sequence[int...
from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: import torch array = torch.Tensor from typing import Union, Sequence, Literal from torch.fft import * # noqa: F403 import torch.fft # Several torch fft functions do not map axes to dim def fftn( x: array, /, ...
import functools import os import os.path import pathlib from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.util...
import functools import os import os.path import pathlib from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper from torchvision.prototype.datasets.utils import EncodedData, EncodedImage from torchvision...
#!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/main/' files = sorted(glob.glob('../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirname(f.replace(...
#!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/3.x/' files = sorted(glob.glob('../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirname(f.replace('...
_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) # augmentation strategy originates from DETR. train_pipeline = [ dict( type='Lo...
_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) # augmentation strategy originates from DETR. train_pipeline = [ dict( typ...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators Extend this class and implement __call__ for custom evaluators. ...
from __future__ import annotations import re from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from sentence_transformers.SentenceTransformer import SentenceTransformer class SentenceEvaluator: """ Base class for all evaluators Extend this class and implement __call__ for custom evaluators. ...
import pytest from backend.util.request import validate_url @pytest.mark.parametrize( "url, trusted_origins, expected_value, should_raise", [ # Rejected IP ranges ("localhost", [], None, True), ("192.168.1.1", [], None, True), ("127.0.0.1", [], None, True), ("0.0.0.0",...
import pytest from backend.util.request import validate_url def test_validate_url(): # Rejected IP ranges with pytest.raises(ValueError): validate_url("localhost", []) with pytest.raises(ValueError): validate_url("192.168.1.1", []) with pytest.raises(ValueError): validate_ur...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Tuple, Dict, List import numpy as np from jina import Executor, requests, DocumentArray, Document from jina_commons import get_logger from jina_commons.indexers.dump import import_vectors class ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Tuple, Dict, List import numpy as np from jina import Executor, requests, DocumentArray, Document from jina_commons import get_logger from jina_commons.indexers.dump import import_vectors class ...
from __future__ import annotations import logging from dataclasses import dataclass from sentence_transformers.data_collator import SentenceTransformerDataCollator logger = logging.getLogger(__name__) @dataclass class SparseEncoderDataCollator(SentenceTransformerDataCollator): """Collator for a SparseEncoder m...
from __future__ import annotations import logging from dataclasses import dataclass from sentence_transformers.data_collator import SentenceTransformerDataCollator logger = logging.getLogger(__name__) @dataclass class SparseEncoderDataCollator(SentenceTransformerDataCollator): """Collator for a SparseEncoder m...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class MaskScoringRCNN(TwoStageDetector): """Mask Scoring RCNN. https://arxiv.org/abs/1903.00241 """ def __init__(self, backbone, ...
from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class MaskScoringRCNN(TwoStageDetector): """Mask Scoring RCNN. https://arxiv.org/abs/1903.00241 """ def __init__(self, backbone, rpn_head, roi_head,...
""" LexRank implementation Source: https://github.com/crabcamp/lexrank/tree/dev """ import logging import numpy as np from scipy.sparse.csgraph import connected_components from scipy.special import softmax logger = logging.getLogger(__name__) def degree_centrality_scores( similarity_matrix, threshold=None,...
""" LexRank implementation Source: https://github.com/crabcamp/lexrank/tree/dev """ import numpy as np from scipy.sparse.csgraph import connected_components from scipy.special import softmax import logging logger = logging.getLogger(__name__) def degree_centrality_scores( similarity_matrix, threshold=None, ...
# dataset settings dataset_type = 'DeepFashionDataset' data_root = 'data/DeepFashion/In-shop/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detection/...
# dataset settings dataset_type = 'DeepFashionDataset' data_root = 'data/DeepFashion/In-shop/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dic...
# Copyright (c) OpenMMLab. All rights reserved. import asyncio import contextlib import logging import os import time from typing import List import torch logger = logging.getLogger(__name__) DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) @contextlib.asynccontextmanager async def comple...
import asyncio import contextlib import logging import os import time from typing import List import torch logger = logging.getLogger(__name__) DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) @contextlib.asynccontextmanager async def completed(trace_name='', name='', ...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_c...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from __future__ import annotations from typing import Any, Dict, Optional from docarray import BaseDoc, DocList from docarray.typing import AnyEmbedding, AnyTensor class LegacyDocument(BaseDoc): """ This Document is the LegacyDocument. It follows the same schema as in DocArray <=0.21. It can be useful t...
from docarray.array.document import DocumentArray from docarray.array.storage.annlite import StorageMixins, AnnliteConfig __all__ = ['AnnliteConfig', 'DocumentArrayAnnlite'] class DocumentArrayAnnlite(StorageMixins, DocumentArray): """ DocumentArray that stores Documents in `ANNLite <https://github.com/jina-...
from .document import DocumentArray from .storage.annlite import StorageMixins, AnnliteConfig __all__ = ['AnnliteConfig', 'DocumentArrayAnnlite'] class DocumentArrayAnnlite(StorageMixins, DocumentArray): """ DocumentArray that stores Documents in `ANNLite <https://github.com/jina-ai/annlite>`_. .. note:...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) # MMEngine support the following two ways, users can choose # according to convenience # optim_wrapper = di...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.)
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDocument from docarray.base_document.io.json import orjson_dumps from docarray.typing import AudioTorchTensor, AudioUrl from docarray.utils.misc import is_tf_avail...
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDocument from docarray.base_document.io.json import orjson_dumps from docarray.typing import AudioTorchTensor, AudioUrl from tests import TOYDATA_DIR AUDIO_FILES ...
# Copyright (c) OpenMMLab. All rights reserved. import asyncio from argparse import ArgumentParser import mmcv from mmdet.apis import (async_inference_detector, inference_detector, init_detector) from mmdet.registry import VISUALIZERS def parse_args(): parser = ArgumentParser() parse...
# Copyright (c) OpenMMLab. All rights reserved. import asyncio from argparse import ArgumentParser import mmcv from mmdet.apis import (async_inference_detector, inference_detector, init_detector) from mmdet.registry import VISUALIZERS from mmdet.utils import register_all_modules def parse_ar...
from langchain_core.prompts.chat import ( ChatPromptTemplate, ) from langchain_core.prompts.prompt import PromptTemplate from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model DEFAULT_REFINE_PROMPT_TMPL = ( "The original question is as follows: {question}\n" "We have provide...
# flake8: noqa from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model from langchain_core.prompts.chat import ( AIMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_core.prompts.prompt import PromptTemplate ...
"""Query Understanding agent pack.""" from typing import Any, Dict, List, Optional from llama_index.core.agent import AgentRunner from llama_index.core.callbacks import CallbackManager from llama_index.core.llama_pack.base import BaseLlamaPack from llama_index.core.llms.llm import LLM from llama_index.core.tools.type...
"""Query Understanding agent pack.""" from typing import Any, Dict, List, Optional from llama_index.core.agent import AgentRunner from llama_index.core.callbacks import CallbackManager from llama_index.core.llama_pack.base import BaseLlamaPack from llama_index.core.llms.llm import LLM from llama_index.core.tools.type...
import pytest from langchain.evaluation import ExactMatchStringEvaluator @pytest.fixture def exact_match_string_evaluator() -> ExactMatchStringEvaluator: """Create an ExactMatchStringEvaluator with default configuration.""" return ExactMatchStringEvaluator() @pytest.fixture def exact_match_string_evaluator...
import pytest from langchain.evaluation import ExactMatchStringEvaluator @pytest.fixture def exact_match_string_evaluator() -> ExactMatchStringEvaluator: """Create an ExactMatchStringEvaluator with default configuration.""" return ExactMatchStringEvaluator() @pytest.fixture def exact_match_string_evaluator...
import grpc import pytest from jina import Flow from jina.clients import Client from jina.serve.helper import get_server_side_grpc_options from jina.serve.runtimes.gateway.grpc import GRPCGateway from tests import random_docs @pytest.fixture(scope='function') def flow_with_grpc(): class AuthInterceptor(grpc.aio....
import grpc import pytest from jina import Flow from jina.clients import Client from jina.serve.helper import get_server_side_grpc_options from jina.serve.runtimes.gateway.grpc import GRPCGateway from tests import random_docs @pytest.fixture(scope='function') def flow_with_grpc(monkeypatch): class AuthIntercepto...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import ( GoogleApiClient, GoogleApiYoutubeLoader, YoutubeLoader, ) # Create a way to dynamically look up deprecated imports. # Used to consolida...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders import ( GoogleApiClient, GoogleApiYoutubeLoader, YoutubeLoader, ) # Create a way to dynamically look up deprecated imports. # Used to consolida...
from typing import Optional import numpy as np import torch from docarray import DocumentArray from docarray.base_document import BaseDocument from docarray.typing import NdArray, TorchTensor def test_proto_simple(): class CustomDoc(BaseDocument): text: str doc = CustomDoc(text='hello') Custom...
from typing import Optional import numpy as np import torch from docarray import DocumentArray from docarray.document import BaseDocument from docarray.typing import NdArray, TorchTensor def test_proto_simple(): class CustomDoc(BaseDocument): text: str doc = CustomDoc(text='hello') CustomDoc.f...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders.pyspark_dataframe import ( PySparkDataFrameLoader, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecat...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.document_loaders.pyspark_dataframe import ( PySparkDataFrameLoader, ) # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecat...
"""Checks the bundled license is installed with the wheel.""" import platform import site from itertools import chain from pathlib import Path site_packages = site.getsitepackages() site_packages_path = (Path(p) for p in site_packages) try: distinfo_path = next( chain( s for site...
"""Checks the bundled license is installed with the wheel.""" import platform import site from itertools import chain from pathlib import Path site_packages = site.getsitepackages() site_packages_path = (Path(p) for p in site_packages) try: distinfo_path = next( chain( s for site...
from typing import Any, Literal, Optional, Union from exa_py import Exa # type: ignore[untyped-import] from exa_py.api import ( HighlightsContentsOptions, # type: ignore[untyped-import] TextContentsOptions, # type: ignore[untyped-import] ) from langchain_core.callbacks import CallbackManagerForRetrieverRun ...
from typing import Any, Dict, List, Literal, Optional, Union from exa_py import Exa # type: ignore[untyped-import] from exa_py.api import ( HighlightsContentsOptions, # type: ignore[untyped-import] TextContentsOptions, # type: ignore[untyped-import] ) from langchain_core.callbacks import CallbackManagerForR...
_base_ = './decoupled_solo_r50_fpn_3x_coco.py' # model settings model = dict( mask_head=dict( type='DecoupledSOLOLightHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 64), (32, 128), (64...
_base_ = './decoupled_solo_r50_fpn_3x_coco.py' # model settings model = dict( mask_head=dict( type='DecoupledSOLOLightHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 64), (32, 128), (64...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from mmengine.data import InstanceData from parameterized import parameterized from mmdet.models.roi_heads import StandardRoIHead # noqa from mmdet.registry import MODELS from mmdet.testing import demo_mm_input...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from mmengine.data import InstanceData from parameterized import parameterized from mmdet.models.roi_heads import StandardRoIHead # noqa from mmdet.registry import MODELS from mmdet.testing import demo_mm_input...
#!/usr/bin/env python3 """Generate feature statistics for training set. Example: python global_stats.py --model-type librispeech --dataset-path /home/librispeech """ import json import logging import pathlib from argparse import ArgumentParser, RawTextHelpFormatter import torch import torchaudio from common import (...
#!/usr/bin/env python3 """Generate feature statistics for training set. Example: python global_stats.py --model-type librispeech --dataset-path /home/librispeech """ import json import logging import pathlib from argparse import ArgumentParser, RawTextHelpFormatter import torch import torchaudio from common import (...
# mypy: allow-untyped-defs r"""Autograd anomaly mode.""" import warnings import torch __all__ = ["detect_anomaly", "set_detect_anomaly"] class detect_anomaly: r"""Context-manager that enable anomaly detection for the autograd engine. This does two things: - Running the forward pass with detection en...
# mypy: allow-untyped-defs r"""Autograd anomaly mode.""" import warnings import torch __all__ = ["detect_anomaly", "set_detect_anomaly"] class detect_anomaly: r"""Context-manager that enable anomaly detection for the autograd engine. This does two things: - Running the forward pass with detection ena...
import numpy as np import pytest from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class RescalingTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_rescaling_basics(self): self.run_layer_test( ...
import numpy as np import pytest from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class RescalingTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_rescaling_basics(self): self.run_layer_test( ...
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
from __future__ import annotations import json from json import JSONDecodeError from typing import Annotated, Any, Optional, TypeVar, Union import jsonpatch # type: ignore[import] import pydantic from pydantic import SkipValidation from langchain_core.exceptions import OutputParserException from langchain_core.outp...
from __future__ import annotations import json from json import JSONDecodeError from typing import Annotated, Any, Optional, TypeVar, Union import jsonpatch # type: ignore[import] import pydantic from pydantic import SkipValidation from langchain_core.exceptions import OutputParserException from langchain_core.outp...
from jina import Client from docarray import DocList from docarray.documents import TextDoc if __name__ == '__main__': c = Client(host='grpc://0.0.0.0:54321') da = c.post( '/', DocList[TextDoc]([TextDoc(), TextDoc()]), return_type=DocList[TextDoc] ) print(da.text)
from jina import Client from docarray import DocList from docarray.documents import TextDoc if __name__ == '__main__': c = Client(host='grpc://0.0.0.0:54321') da = c.post('/', DocList[TextDoc]([TextDoc(), TextDoc()]), return_type=DocList[TextDoc]) print(da.text)
import json import logging from enum import Enum from typing import Any from requests.exceptions import HTTPError, RequestException from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request import requests logger = logging.getLo...
import json import logging from enum import Enum from typing import Any from requests.exceptions import HTTPError, RequestException from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request import requests logger = logging.getLo...
# Copyright (c) OpenMMLab. All rights reserved. import math import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.builder import NECKS @NECKS.register_module() class CTResNetNeck(BaseModule): """The neck used in `CenterNet <https://arxiv.org/abs/19...
import math import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.builder import NECKS @NECKS.register_module() class CTResNetNeck(BaseModule): """The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for object classification and bo...
"""This module contains the core type definitions and protocols used throughout Dynamo. The types defined here fall into several categories: - Guard related types (GuardFn, GuardFail, GuardedCode): Used for tracking and managing guards that protect compiled code - Frame and cache types (FrameState, CacheEntry): Used f...
"""This module contains the core type definitions and protocols used throughout Dynamo. The types defined here fall into several categories: - Guard related types (GuardFn, GuardFail, GuardedCode): Used for tracking and managing guards that protect compiled code - Frame and cache types (FrameState, CacheEntry): Used f...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import numpy as np import pytest from jina import Document, DocumentArray, Flow cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_video_torch_encoder(): model_state...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import numpy as np from jina import Document, Flow, DocumentArray from ...custom_image_torch_encoder import CustomImageTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_vi...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model, not mutilingual but hope to see some on the hub soon m...
import logging from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseEncoder, SparseTranslationEvaluator, SpladePooling, ) logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) # Initialize the SP...
# Copyright (c) OpenMMLab. All rights reserved. from .builder import build_match_cost from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost __all__ = [ 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', 'FocalLossCost' ]
from .builder import build_match_cost from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost __all__ = [ 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', 'FocalLossCost' ]
_base_ = './mask-rcnn_hrnetv2p-w18-1x_coco.py' # learning policy max_epochs = 24 train_cfg = dict(max_epochs=max_epochs) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, b...
_base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' # learning policy max_epochs = 24 train_cfg = dict(max_epochs=max_epochs) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, b...
import functools import importlib import os import re from pathlib import Path from typing import TYPE_CHECKING, TypeVar if TYPE_CHECKING: from backend.data.block import Block T = TypeVar("T") @functools.cache def load_all_blocks() -> dict[str, type["Block"]]: from backend.data.block import Block # Dyn...
import functools import importlib import os import re from pathlib import Path from typing import TYPE_CHECKING, TypeVar if TYPE_CHECKING: from backend.data.block import Block T = TypeVar("T") @functools.cache def load_all_blocks() -> dict[str, type["Block"]]: from backend.data.block import Block # Dyn...
import asyncio import os import random import string import tempfile import time import pytest from jina import helper @pytest.fixture(scope='function') def random_workspace_name(): """Generate a random workspace name with digits and letters.""" rand = ''.join(random.choices(string.ascii_uppercase + string....
import asyncio import os import random import string import tempfile import time import pytest from jina import helper @pytest.fixture(scope='function') def random_workspace_name(): """Generate a random workspace name with digits and letters.""" rand = ''.join(random.choices(string.ascii_uppercase + string....
from typing import Iterable, Type from docarray.array.abstract_array import AbstractDocumentArray from docarray.array.mixins import GetAttributeArrayMixin, ProtoArrayMixin from docarray.document import AnyDocument, BaseDocument, BaseNode class DocumentArray( list, ProtoArrayMixin, GetAttributeArrayMixin,...
from typing import Iterable, Type from docarray.array.abstract_array import AbstractDocumentArray from docarray.array.mixins import GetAttributeArrayMixin, ProtoArrayMixin from docarray.document import AnyDocument, BaseDocument, BaseNode from docarray.document.abstract_document import AbstractDocument class Document...
from __future__ import annotations import os import pytest from sentence_transformers import SentenceTransformer from sentence_transformers.models import Pooling, Transformer from sentence_transformers.util import is_datasets_available from tests.utils import SafeTemporaryDirectory if is_datasets_available(): f...
from __future__ import annotations import os import pytest from sentence_transformers import CrossEncoder, SentenceTransformer from sentence_transformers.models import Pooling, Transformer from sentence_transformers.util import is_datasets_available from tests.utils import SafeTemporaryDirectory if is_datasets_avai...
from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available def text_encoder_lora_state_dict(text_encoder): deprecate( "text_encoder_load_state_dict in `models`", ...
from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available def text_encoder_lora_state_dict(text_encoder): deprecate( "text_encoder_load_state_dict in `models`", ...
_base_ = 'mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' # noqa # Enable automatic-mixed-precision training with AmpOptimWrapper. optim_wrapper = dict(type='AmpOptimWrapper')
_base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # Enable automatic-mixed-precision training with AmpOptimWrapper. optim_wrapper = dict(type='AmpOptimWrapper')
# pants requires this import to recognize the dep import pytest_asyncio # noqa: F401 import pytest import os from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface from llama_index.embeddings.nvidia.base import DEFAULT_MODEL from typing import Generator # this fixture is used to mask the NVIDIA_AP...
import pytest import os from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface from llama_index.embeddings.nvidia.base import DEFAULT_MODEL from typing import Generator # this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it # after the test. it also returns the value o...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
import collections.abc import dataclasses from typing import Optional, Sequence import pytest import torch from torch.nn.functional import one_hot from torchvision.prototype import datapoints from transforms_v2_legacy_utils import combinations_grid, DEFAULT_EXTRA_DIMS, from_loader, from_loaders, TensorLoader @data...
import collections.abc import dataclasses from typing import Optional, Sequence import pytest import torch from common_utils import combinations_grid, DEFAULT_EXTRA_DIMS, from_loader, from_loaders, TensorLoader from torch.nn.functional import one_hot from torchvision.prototype import datapoints @dataclasses.datacl...
import types from typing_extensions import TYPE_CHECKING from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.typing.tensor.image.image_tensor import ImageTensor from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: ...
import types from typing_extensions import TYPE_CHECKING from docarray.typing.tensor.image.image_ndarray import ImageNdArray from docarray.typing.tensor.image.image_tensor import ImageTensor from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: ...
from __future__ import annotations try: from typing import Self except ImportError: from typing_extensions import Self import torch import transformers from PIL import Image from sentence_transformers.models.Asym import InputModule class CLIPModel(InputModule): save_in_root: bool = True def __init...
from __future__ import annotations import torch import transformers from PIL import Image from torch import nn class CLIPModel(nn.Module): save_in_root: bool = True def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None: super().__init__() if proce...
from __future__ import annotations from collections.abc import Iterable from enum import Enum from typing import Any import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import pairwise_cos_sim, pairwise...
from __future__ import annotations from collections.abc import Iterable from enum import Enum from typing import Any import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import pairwise_cos_sim, pairwise...
"""LLMResult class.""" from __future__ import annotations from copy import deepcopy from typing import Literal, Optional, Union from pydantic import BaseModel from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk from langchain_core.outputs.generation import Generation, GenerationCh...
"""LLMResult class.""" from __future__ import annotations from copy import deepcopy from typing import Literal, Optional, Union from pydantic import BaseModel from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk from langchain_core.outputs.generation import Generation, GenerationCh...
# Copyright (c) OpenMMLab. All rights reserved. from .visualizer import Visualizer from .writer import (BaseWriter, ComposedWriter, LocalWriter, TensorboardWriter, WandbWriter) __all__ = [ 'Visualizer', 'BaseWriter', 'LocalWriter', 'WandbWriter', 'TensorboardWriter', 'ComposedWriter' ]
# Copyright (c) OpenMMLab. All rights reserved. from .visualizer import Visualizer __all__ = ['Visualizer']
""" This examples measures the inference speed of a certain model Usage: python evaluation_inference_speed.py OR python evaluation_inference_speed.py model_name """ import sys import time import torch from datasets import load_dataset from sentence_transformers import SentenceTransformer # Limit torch to 4 threads...
""" This examples measures the inference speed of a certain model Usage: python evaluation_inference_speed.py OR python evaluation_inference_speed.py model_name """ from sentence_transformers import SentenceTransformer import sys import time import torch from datasets import load_dataset # Limit torch to 4 threads t...
from typing import Iterable, Dict from ..base.getsetdel import BaseGetSetDelMixin from ..base.helper import Offset2ID from .... import Document class GetSetDelMixin(BaseGetSetDelMixin): """Provide concrete implementation for ``__getitem__``, ``__setitem__``, and ``__delitem__`` for ``DocumentArrayElastic``""...
from typing import Iterable, Dict from ..base.getsetdel import BaseGetSetDelMixin from ..base.helper import Offset2ID from .... import Document class GetSetDelMixin(BaseGetSetDelMixin): """Provide concrete implementation for ``__getitem__``, ``__setitem__``, and ``__delitem__`` for ``DocumentArrayElastic``""...
from docarray.array.any_array import AnyDocArray from docarray.array.doc_list.doc_list import DocList from docarray.array.doc_vec.doc_vec import DocVec __all__ = ['DocList', 'DocVec', 'AnyDocArray']
from docarray.array.array.array import DocArray from docarray.array.stacked.array_stacked import DocArrayStacked __all__ = ['DocArray', 'DocArrayStacked']
import logging import time from datetime import datetime from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger from autogpt_libs.utils.cache import thread_cached from backend.data.block import BlockInput from backend.data.schedule import ( ExecutionSch...
import logging import time from datetime import datetime from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger from autogpt_libs.utils.cache import thread_cached_property from backend.data.block import BlockInput from backend.data.schedule import ( Exe...
# Copyright (c) OpenMMLab. All rights reserved. import os import platform import warnings import cv2 import torch.multiprocessing as mp from mmengine import DefaultScope def setup_multi_processes(cfg): """Setup multi-processing environment variables.""" # set multi-process start method as `fork` to speed up ...
# Copyright (c) OpenMMLab. All rights reserved. import os import platform import warnings import cv2 import torch.multiprocessing as mp def setup_multi_processes(cfg): """Setup multi-processing environment variables.""" # set multi-process start method as `fork` to speed up the training if platform.syste...
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional, utils # usort: skip from ._transform import Transform # usort: skip from ._augment import Cutmix, Mixup, RandomErasing from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide fro...
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional, utils # usort: skip from ._transform import Transform # usort: skip from ._augment import Cutmix, Mixup, RandomErasing from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide fro...
import numpy as np from docarray import BaseDocument from docarray.typing import NdArray def test_set_tensor(): class MyDocument(BaseDocument): tensor: NdArray d = MyDocument(tensor=np.zeros((3, 224, 224))) assert isinstance(d.tensor, NdArray) assert isinstance(d.tensor, np.ndarray) ass...
import numpy as np from docarray import Document from docarray.typing import NdArray def test_set_tensor(): class MyDocument(Document): tensor: NdArray d = MyDocument(tensor=np.zeros((3, 224, 224))) assert isinstance(d.tensor, NdArray) assert isinstance(d.tensor, np.ndarray) assert (d.t...
import os from pathlib import Path from typing import List, Optional, Tuple, Union import torch import torchaudio from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import extract_archive _URL = "https://zenodo.org/record/3338373/files/musdb18hq.zip" _CHECKS...
import os from pathlib import Path from typing import List, Optional, Tuple, Union import torch import torchaudio from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.utils import extract_archive _URL = "https://zenodo.org/record/3338373/files/musdb18hq.zip" _CHECKS...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox_overlaps, get_box_tensor def cast_tensor_type(x, scale=1., dtype=None): if dtype == 'fp16': # scale is for preventing overflows x = (x / scale).half() retu...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox_overlaps, get_box_tensor def cast_tensor_type(x, scale=1., dtype=None): if dtype == 'fp16': # scale is for preventing overflows x = (x / scale).half() retu...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import torch.nn.functional as F import torchvision import torchvision.transforms as transforms from torch.optim import SGD from mmengine.evaluator import BaseMetric from mmengine.model import BaseModel from mmengine.runner import Runner class MMResNet5...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import torch.nn.functional as F import torchvision import torchvision.transforms as transforms from torch.optim import SGD from torch.utils.data import DataLoader from mmengine.evaluator import BaseMetric from mmengine.model import BaseModel from mmengin...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='PAA', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=di...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='PAA', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=di...
__version__ = '0.16.2' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
__version__ = '0.16.1' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
from __future__ import annotations from typing import Any, Optional, Union import PIL.Image import torch from ._tv_tensor import TVTensor class Image(TVTensor): """:class:`torch.Tensor` subclass for images with shape ``[..., C, H, W]``. .. note:: In the :ref:`transforms <transforms>`, ``Image`` i...
from __future__ import annotations from typing import Any, Optional, Union import PIL.Image import torch from ._tv_tensor import TVTensor class Image(TVTensor): """:class:`torch.Tensor` subclass for images. .. note:: In the :ref:`transforms <transforms>`, ``Image`` instances are largely i...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) file_client_args = dict(backend='disk') # comment out the code below to use different file client # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # ...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) file_client_args = dict(backend='disk') # comment out the code below to use different file client # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # ...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='SingleStageDetector', backbone=dict( type='MobileNetV2', out_indices=(4, 7), norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), init_cfg=dict(type='TruncNormal', layer='C...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='SingleStageDetector', backbone=dict( type='MobileNetV2', out_indices=(4, 7), norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), init_cfg=dict(type='TruncNormal', layer='C...
"""**Text Splitters** are classes for splitting text. **Class hierarchy:** .. code-block:: BaseDocumentTransformer --> TextSplitter --> <name>TextSplitter # Example: CharacterTextSplitter RecursiveCharacterTextSplitter --> <name>TextSplitter Note: **MarkdownHea...
"""**Text Splitters** are classes for splitting text. **Class hierarchy:** .. code-block:: BaseDocumentTransformer --> TextSplitter --> <name>TextSplitter # Example: CharacterTextSplitter RecursiveCharacterTextSplitter --> <name>TextSplitter Note: **MarkdownHea...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.regularizers import deserialize as deserialize from keras.src.regularizers import get as get from keras.src.regularizers import serialize as serialize from keras.src.regularizers.regu...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.regularizers import deserialize from keras.src.regularizers import get from keras.src.regularizers import serialize from keras.src.regularizers.regularizers import L1 from keras.src.r...
from PIL import Image from sentence_transformers import SentenceTransformer, models, util ########### image = Image.open("two_dogs_in_snow.jpg") from transformers import CLIPModel, CLIPProcessor model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip...
from PIL import Image from sentence_transformers import SentenceTransformer, models, util ########### image = Image.open("two_dogs_in_snow.jpg") from transformers import CLIPModel, CLIPProcessor model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.california_housing import load_data as load_data
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.california_housing import load_data
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseTripletLoss(TripletLoss): def __init_...
from __future__ import annotations from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseTripletLoss(TripletLoss): def __init__( self, model: SparseEncoder, distance_metric=TripletDi...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.registry import MODELS from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg from mmdet.utils import register_all_modules class TestGr...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.registry import MODELS from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg from mmdet.utils import register_all_modules class TestGr...
from docarray.typing.bytes import ImageBytes from docarray.typing.id import ID from docarray.typing.tensor import ImageNdArray, ImageTensor from docarray.typing.tensor.audio import AudioNdArray from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding from docarray.typing.tensor.ndarray impo...
from docarray.typing.bytes import ImageBytes from docarray.typing.id import ID from docarray.typing.tensor import ImageNdArray, ImageTensor from docarray.typing.tensor.audio import AudioNdArray from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding from docarray.typing.tensor.ndarray impo...
from langchain_core.output_parsers.json import ( SimpleJsonOutputParser, ) from langchain_core.utils.json import ( parse_and_check_json_markdown, parse_json_markdown, parse_partial_json, ) __all__ = [ "SimpleJsonOutputParser", "parse_and_check_json_markdown", "parse_json_markdown", "par...
from langchain_core.output_parsers.json import ( SimpleJsonOutputParser, ) from langchain_core.utils.json import ( parse_and_check_json_markdown, parse_json_markdown, parse_partial_json, ) __all__ = [ "SimpleJsonOutputParser", "parse_partial_json", "parse_json_markdown", "parse_and_chec...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["ChromaPi...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["ChromaPi...
""" Mbox parser. Contains simple parser for mbox files. """ import logging from pathlib import Path from typing import Any, Dict, List, Optional from fsspec import AbstractFileSystem from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name_...
"""Mbox parser. Contains simple parser for mbox files. """ import logging from pathlib import Path from typing import Any, Dict, List, Optional from fsspec import AbstractFileSystem from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogger(__name__...
import torch from torch import nn from typing import List import os import json class CNN(nn.Module): """CNN-layer with multiple kernel-sizes over the word embeddings""" def __init__( self, in_word_embedding_dimension: int, out_channels: int = 256, kernel_sizes: List[int] = [1...
import torch from torch import nn from typing import List import os import json class CNN(nn.Module): """CNN-layer with multiple kernel-sizes over the word embeddings""" def __init__( self, in_word_embedding_dimension: int, out_channels: int = 256, kernel_sizes: List[int] = [1...
from typing import Any, Optional from typing_extensions import get_origin from typing_inspect import get_args, is_typevar, is_union_type from docarray.typing.id import ID from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True if type is a type Te...
from typing import Any, Optional from typing_extensions import get_origin from typing_inspect import get_args, is_typevar, is_union_type from docarray.typing.tensor.abstract_tensor import AbstractTensor def is_type_tensor(type_: Any) -> bool: """Return True if type is a type Tensor or an Optional Tensor type.""...
import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_document.io.json import orjson_dumps from docarray.typing import TorchEmbedding, TorchTensor def test_proto_tensor(): tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224)) tensor._to_node_protobuf()...
import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_document.io.json import orjson_dumps from docarray.typing import TorchEmbedding, TorchTensor def test_proto_tensor(): tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224)) tensor._to_node_protobuf()...
import asyncio import numpy as np from typing import Any, List, Literal, Optional from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.bridge.pydantic import Field, PrivateAttr, ConfigDict from fastembed import TextEmbedding class FastEmbedEmbedding(BaseEmbedding): """ Qdrant...
from typing import Any, List, Literal, Optional import numpy as np from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.bridge.pydantic import Field, PrivateAttr from fastembed import TextEmbedding class FastEmbedEmbedding(BaseEmbedding): """ Qdrant FastEmbedding models. ...
import io import logging from enum import Enum import replicate import replicate.exceptions import requests from replicate.helpers import FileOutput from backend.data.graph import Graph from backend.util.settings import Settings logger = logging.getLogger(__name__) class ImageSize(str, Enum): LANDSCAPE = "1024...
import io import logging from enum import Enum import replicate import replicate.exceptions import requests from replicate.helpers import FileOutput from backend.data.graph import Graph from backend.util.settings import Settings logger = logging.getLogger(__name__) class ImageSize(str, Enum): LANDSCAPE = "1024...