input
stringlengths
33
5k
output
stringlengths
32
5k
import argparse from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from tarfile import TarFile from zipfile import ZipFile import torch def parse_args(): parser = argparse.ArgumentParser( description='Download datasets for training') parser.add_argument(...
import argparse from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from tarfile import TarFile from zipfile import ZipFile import torch def parse_args(): parser = argparse.ArgumentParser( description='Download datasets for training') parser.add_argument(...
"""langchain-core version information and utilities.""" VERSION = "0.3.57"
"""langchain-core version information and utilities.""" VERSION = "0.3.56"
from typing import Any from llama_index.core.bridge.pydantic import model_serializer from llama_index.core.tools import ToolSelection, ToolOutput from llama_index.core.llms import ChatMessage from llama_index.core.workflow import Event, StartEvent class AgentInput(Event): """LLM input.""" input: list[ChatMe...
from typing import Any from llama_index.core.tools import ToolSelection, ToolOutput from llama_index.core.llms import ChatMessage from llama_index.core.workflow import Event class AgentInput(Event): """LLM input.""" input: list[ChatMessage] current_agent_name: str class AgentSetup(Event): """Agent...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest import torch from mmdet.core.bbox import distance2bbox from mmdet.core.mask.structures import BitmapMasks, PolygonMasks from mmdet.core.utils import center_of_mass, mask2ndarray def dummy_raw_polygon_masks(size): """ Args: ...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest import torch from mmdet.core.bbox import distance2bbox from mmdet.core.mask.structures import BitmapMasks, PolygonMasks from mmdet.core.utils import center_of_mass, mask2ndarray def dummy_raw_polygon_masks(size): """ Args: ...
_base_ = './cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' # noqa # please install mmcls>=1.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) checkpoint_file = 'https://download.openmmlab.com/mmclassifi...
_base_ = './cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py' # noqa # please install mmcls>=1.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) checkpoint_file = 'https://download.openmmlab.com/mmclassifi...
"""Standard LangChain interface tests""" import os from typing import Type import pytest from langchain_core.language_models import BaseChatModel from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_openai import AzureChatOpenAI OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API...
"""Standard LangChain interface tests""" import os from typing import Type import pytest from langchain_core.language_models import BaseChatModel from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_openai import AzureChatOpenAI OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API...
"""Torch backend APIs. # Note on device placement Torch has a different device placement style compared to TF and JAX. In short, variables/tensors are not created on GPU by default, and the GPU cannot directly communicate with the CPU. To bring Torch behavior in line with TF and JAX automated device placement, we are...
"""Torch backend APIs. # Note on device placement Torch has a different device placement style compared to TF and JAX. In short, variables/tensors are not created on GPU by default, and the GPU cannot directly communicate with the CPU. To bring Torch behavior in line with TF and JAX automated device placement, we are...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Dict, List, Optional, Union import torch from mmengine.optim.optimizer._deepspeed import DeepSpeedOptimWrapper from mmengine.registry import MODEL_WRAPPERS try: from deepspeed.runtime.engine import DeepSpeedEngine except ImportError: Dee...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, Dict, List, Optional, Union import torch from deepspeed.runtime.engine import DeepSpeedEngine from mmengine.optim.optimizer._deepspeed import DeepSpeedOptimWrapper from mmengine.registry import MODEL_WRAPPERS @MODEL_WRAPPERS.register_module() c...
""" This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval. As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25. As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss. """ import logging import trac...
""" This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval. As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25. As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss. """ import logging import trac...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
import numpy as np import torch from docarray import Document, Image, Text from docarray.typing import ( AnyUrl, Embedding, ImageUrl, Mesh3DUrl, NdArray, PointCloud3DUrl, Tensor, TextUrl, TorchEmbedding, TorchTensor, ) from docarray.typing.tensor import NdArrayEmbedding def te...
import numpy as np import torch from docarray import Document, Image, Text from docarray.typing import ( AnyUrl, Embedding, ImageUrl, NdArray, Tensor, TextUrl, TorchEmbedding, TorchTensor, ) from docarray.typing.tensor import NdArrayEmbedding def test_multi_modal_doc_proto(): clas...
#!/usr/bin/env python3 """Convert the fairseq models available in voxpopuli repo https://github.com/facebookresearch/voxpopuli The available checkpoints should open with fairseq. But the following error cannot be resolved with almost any version of fairseq. https://github.com/facebookresearch/voxpopuli/issues/29 So t...
#!/usr/bin/env python3 """Convert the fairseq models available in voxpopuli repo https://github.com/facebookresearch/voxpopuli The available checkpoints should open with fairseq. But the following error cannot be resolved with almost any version of fairseq. https://github.com/facebookresearch/voxpopuli/issues/29 So t...
from sentence_transformers import models from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling print("# ------------------------------------------example with v2 distill-----------------------------------------") doc_en...
from sentence_transformers import models from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling print("# ------------------------------------------example with v2 distill-----------------------------------------") doc_en...
import jwt # noqa import pytest from llama_index.core import Document from llama_index.core.vector_stores.types import ( BasePydanticVectorStore, MetadataFilter, MetadataFilters, FilterCondition, FilterOperator, ) from llama_index.vector_stores.deeplake import DeepLakeVectorStore def test_class(...
import jwt # noqa from llama_index.core import Document from llama_index.core.vector_stores.types import ( BasePydanticVectorStore, MetadataFilter, MetadataFilters, FilterCondition, FilterOperator, ) from llama_index.vector_stores.deeplake import DeepLakeVectorStore def test_class(): names_o...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.image import affine_transform as affine_transform from keras.src.ops.image import crop_images as crop_images from keras.src.ops.image import elastic_transform as elastic_transform...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.image import affine_transform from keras.src.ops.image import crop_images from keras.src.ops.image import elastic_transform from keras.src.ops.image import extract_patches from ke...
import os import shutil import subprocess import numpy as np import PIL.Image as Image import pytest from jina import Document, Flow cur_dir = os.path.dirname(os.path.abspath(__file__)) def data_generator(num_docs): for i in range(num_docs): doc = Document(uri=os.path.join(cur_dir, '..', 'imgs', 'cat.jp...
import os import shutil import subprocess import numpy as np import PIL.Image as Image import pytest from jina import Document, Flow cur_dir = os.path.dirname(os.path.abspath(__file__)) def data_generator(num_docs): for i in range(num_docs): doc = Document(uri=os.path.join(cur_dir, '..', 'test_data', 't...
# pylint: disable=invalid-name,unused-import """For compatibility and optional dependencies.""" import functools import importlib.util import logging import sys import types from typing import Any, Sequence, cast import numpy as np from ._typing import _T assert sys.version_info[0] == 3, "Python 2 is no longer suppo...
# pylint: disable=invalid-name,unused-import """For compatibility and optional dependencies.""" import importlib.util import logging import sys import types from typing import Any, Sequence, cast import numpy as np from ._typing import _T assert sys.version_info[0] == 3, "Python 2 is no longer supported." def py_s...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) METAINFO = { 'classes': ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'dinin...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) METAINFO = { 'CLASSES': ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'dinin...
# Copyright (c) OpenMMLab. All rights reserved. from functools import partial from typing import Optional import torch TORCH_VERSION = torch.__version__ def is_rocm_pytorch() -> bool: """Check whether the PyTorch is compiled on ROCm.""" is_rocm = False if TORCH_VERSION != 'parrots': try: ...
# Copyright (c) OpenMMLab. All rights reserved. from functools import partial from typing import Optional import torch TORCH_VERSION = torch.__version__ def is_rocm_pytorch() -> bool: is_rocm = False if TORCH_VERSION != 'parrots': try: from torch.utils.cpp_extension import ROCM_HOME ...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, ...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, ...
from __future__ import annotations from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import ( SparseBinaryClassificationEvaluator, ) from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import ( SparseEmbeddingSimilarityEvaluator, ) from...
from __future__ import annotations from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import ( SparseBinaryClassificationEvaluator, ) from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import ( SparseEmbeddingSimilarityEvaluator, ) from...
""" This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820 TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single passage is marked as relevant for a given query. Many other highly relevant passages are n...
""" This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820 TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single passage is marked as relevant for a given query. Many other highly relevant passages are n...
# Basic unittests to test functioning of module's top-level __author__ = "Yaroslav Halchenko" __license__ = "BSD" try: from sklearn import * # noqa: F403 _top_import_error = None except Exception as e: _top_import_error = e def test_import_skl(): # Test either above import has failed for some re...
# Basic unittests to test functioning of module's top-level __author__ = "Yaroslav Halchenko" __license__ = "BSD" try: from sklearn import * # noqa _top_import_error = None except Exception as e: _top_import_error = e def test_import_skl(): # Test either above import has failed for some reason ...
"""Module for async requests generator.""" from typing import AsyncIterator, Optional, Dict, TYPE_CHECKING from jina.clients.request.helper import _new_data_request_from_batch, _new_data_request from jina.enums import DataInputType from jina.importer import ImportExtensions from jina.logging.predefined import default...
"""Module for async requests generator.""" from typing import AsyncIterator, Optional, Dict, TYPE_CHECKING from jina.clients.request.helper import _new_data_request_from_batch, _new_data_request from jina.enums import DataInputType from jina.importer import ImportExtensions from jina.logging.predefined import default...
_base_ = [ '../_base_/models/cascade-rcnn_r50_fpn.py', '../common/lsj-200e_coco-detection.py' ] image_size = (1024, 1024) batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] # disable allowed_border to avoid potential errors. model = dict( data_preprocessor=dict(batch_augments=batch_augments...
_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../common/lsj_200e_coco_detection.py' ] image_size = (1024, 1024) batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] # disable allowed_border to avoid potential errors. model = dict( data_preprocessor=dict(batch_augments=batch_augments...
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.a...
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.a...
_base_ = './ms-rcnn_r50-caffe_fpn_1x_coco.py' # learning policy max_epochs = 24 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', ...
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' # learning policy max_epochs = 24 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', ...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn from autogpt_libs.feature_flag.client import ( initialize_launchdarkly, shutdown_launchdarkly, ) import backend.data.block import backend.data.db import backend.data.graph imp...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn import backend.data.block import backend.data.db import backend.data.graph import backend.data.user import backend.server.routers.v1 import backend.util.service import backend.util.se...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow T = TypeVar('T', bound='AudioTensorFlowTensor') @_register_pr...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow T = TypeVar('T', bound='AudioTensorFlowTensor') @_register_pr...
import base64 import json import pickle from abc import ABC, abstractmethod from typing import Any from pydantic import BaseModel from llama_index.core.schema import BaseComponent from .utils import import_module_from_qualified_name, get_qualified_name class BaseSerializer(ABC): @abstractmethod def serialize...
import base64 import json import pickle from abc import ABC, abstractmethod from typing import Any from pydantic import BaseModel from llama_index.core.schema import BaseComponent from .utils import import_module_from_qualified_name, get_qualified_name class BaseSerializer(ABC): @abstractmethod def serialize...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest import torch from mmengine import MessageHub class TestMessageHub: def test_init(self): message_hub = MessageHub('name') assert message_hub.instance_name == 'name' assert len(message_hub.log_buffers) == 0 ...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest import torch from mmengine import MessageHub class TestMessageHub: def test_init(self): message_hub = MessageHub('name') assert message_hub.instance_name == 'name' assert len(message_hub.log_buffers) == 0 ...
# Copyright (c) OpenMMLab. All rights reserved. import os import os.path as osp from pathlib import Path from .misc import is_str def is_filepath(x): return is_str(x) or isinstance(x, Path) def fopen(filepath, *args, **kwargs): if is_str(filepath): return open(filepath, *args, **kwargs) elif is...
# Copyright (c) OpenMMLab. All rights reserved. import os import os.path as osp from pathlib import Path from .misc import is_str def is_filepath(x): return is_str(x) or isinstance(x, Path) def fopen(filepath, *args, **kwargs): if is_str(filepath): return open(filepath, *args, **kwargs) elif is...
from typing import Optional import numpy as np import pytest from pydantic import BaseModel, ValidationError from typing_extensions import TypedDict from docarray import BaseDoc, DocList from docarray.documents import AudioDoc, ImageDoc, TextDoc from docarray.documents.helper import ( create_doc, create_doc_f...
from typing import Optional import numpy as np import pytest from pydantic import BaseModel, ValidationError from typing_extensions import TypedDict from docarray import BaseDoc, DocArray from docarray.documents import AudioDoc, ImageDoc, TextDoc from docarray.documents.helper import ( create_doc, create_doc_...
import importlib import os import re import types from typing import Any, Optional, Literal import numpy as np try: import torch # noqa: F401 except ImportError: torch_imported = False else: torch_imported = True try: import tensorflow as tf # type: ignore # noqa: F401 except (ImportError, TypeErr...
import importlib import os import re import types from typing import Any, Optional import numpy as np try: import torch # noqa: F401 except ImportError: torch_imported = False else: torch_imported = True try: import tensorflow as tf # type: ignore # noqa: F401 except (ImportError, TypeError): ...
import pytest from llama_index.core import MockEmbedding, StorageContext, VectorStoreIndex from llama_index.core.llms import MockLLM from llama_index.core.vector_stores.types import BasePydanticVectorStore from llama_index.vector_stores.redis import RedisVectorStore def test_class(): names_of_base_classes = [b._...
from llama_index.core import MockEmbedding, StorageContext, VectorStoreIndex from llama_index.core.llms import MockLLM from llama_index.core.vector_stores.types import BasePydanticVectorStore from llama_index.vector_stores.redis import RedisVectorStore def test_class(): names_of_base_classes = [b.__name__ for b i...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule from ..builder import build_shared_head class BaseRoIHead(BaseModule, metaclass=ABCMeta): """Base class for RoIHeads.""" def __init__(self, bbox_roi_extractor=None, ...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule from ..builder import build_shared_head class BaseRoIHead(BaseModule, metaclass=ABCMeta): """Base class for RoIHeads.""" def __init__(self, bbox_roi_extractor=None, ...
import enum import pathlib from typing import Any, BinaryIO, Optional, Union from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvision.proto...
import enum import pathlib from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource fro...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["FluxPipe...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["FluxPipe...
from __future__ import annotations from collections.abc import Iterable from typing import Any import torch from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import fullname class CosineSimilarityLoss(nn.Module): def __init__(...
from __future__ import annotations from collections.abc import Iterable from typing import Any import torch from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import fullname class CosineSimilarityLoss(nn.Module): def __init__(...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_doc import BaseDoc from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='TextDoc') class TextDoc(BaseDoc): """ Document for handling text. It can contain: - a [...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_doc import BaseDoc from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='TextDoc') class TextDoc(BaseDoc): """ Document for handling text. It can contain: - a [...
from types import SimpleNamespace from unittest.mock import patch import pytest from llama_index.core.base.llms.types import ( CompletionResponse, ChatMessage, ChatResponse, ) from llama_index.llms.dashscope.base import DashScope class FakeDashscopeResponse: def __init__(self, data: dict): s...
from unittest.mock import patch import pytest from llama_index.core.base.llms.types import ( CompletionResponse, ChatMessage, ChatResponse, ) from llama_index.llms.dashscope.base import DashScope @pytest.fixture() def dashscope_llm(): return DashScope(api_key="test") @pytest.fixture() def dashscop...
import warnings from typing import List, Optional, TypeVar from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.mimetypes import VIDEO_MIMETYPE from docarray.utils._in...
import warnings from typing import List, Optional, TypeVar from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.mimetypes import VIDEO_MIMETYPE from docarray.utils._in...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
import torch from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda from torchaudio_unittest.models.rnnt.rnnt_test_impl import RNNTTestImpl @skipIfNoCuda class RNNTFloat32GPUTest(RNNTTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda") @skipIfNoCuda class RNNTF...
import torch from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase from torchaudio_unittest.models.rnnt.rnnt_test_impl import RNNTTestImpl @skipIfNoCuda class RNNTFloat32GPUTest(RNNTTestImpl, PytorchTestCase): dtype = torch.float32 device = torch.device("cuda") @skipIfNoCuda class RNNTF...
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'] num_things_classes = 80 num_stuff_classes = 0 num_classes = num_things_classes + num_stuff_classes image_size = (1024, 1024) batch_augments = [ dict( type='BatchFixedSizePad', size=image_size, img_pad_value=0, pad_mask=Tru...
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'] num_things_classes = 80 num_stuff_classes = 0 num_classes = num_things_classes + num_stuff_classes image_size = (1024, 1024) batch_augments = [ dict( type='BatchFixedSizePad', size=image_size, img_pad_value=0, pad_mask=True...
from typing import Union from google.oauth2.service_account import Credentials # type: ignore from google.cloud import aiplatform, storage from google.cloud.aiplatform import telemetry from google.cloud.aiplatform.matching_engine import ( MatchingEngineIndex, MatchingEngineIndexEndpoint, ) from llama_index.v...
from typing import Union from google.oauth2.service_account import Credentials # type: ignore from google.cloud import aiplatform, storage from google.cloud.aiplatform import telemetry from google.cloud.aiplatform.matching_engine import ( MatchingEngineIndex, MatchingEngineIndexEndpoint, ) from llama_index.v...
from __future__ import annotations import pytest from torch import Tensor from sentence_transformers import SparseEncoder @pytest.mark.parametrize( "model_name", [ ("sentence-transformers/all-MiniLM-L6-v2"), ], ) def test_load_and_encode(model_name: str) -> None: # Ensure that SparseEncoder ...
from __future__ import annotations import pytest from torch import Tensor from sentence_transformers import SparseEncoder @pytest.mark.parametrize( "model_name", [ ("sentence-transformers/all-MiniLM-L6-v2"), ], ) def test_load_and_encode(model_name: str) -> None: # Ensure that SparseEncoder ...
from __future__ import annotations import pytest from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer from sentence_transformers.model_card import generate_model_card from sentence_transformers.util import is_datasets_available, is_training_available if is_datasets_available(): from ...
from __future__ import annotations import pytest from datasets import Dataset, DatasetDict from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer from sentence_transformers.model_card import generate_model_card @pytest.fixture(scope="session") def dummy_dataset(): """ Dummy datase...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembledis...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser-ensembledis...
import subprocess import pytest from clip_text import CLIPTextEncoder from jina import Document, DocumentArray, Flow _EMBEDDING_DIM = 512 @pytest.mark.parametrize('request_size', [1, 10, 50, 100]) def test_integration(request_size: int): docs = DocumentArray( [Document(text='just some random text here')...
import subprocess import pytest from clip_text import CLIPTextEncoder from jina import Document, DocumentArray, Flow _EMBEDDING_DIM = 512 @pytest.mark.parametrize('request_size', [1, 10, 50, 100]) def test_integration(request_size: int): docs = DocumentArray( [Document(text='just some random text here')...
import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """Builder Config for AudioFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post...
import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """Builder Config for AudioFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post...
from __future__ import annotations from .CSRSparsity import CSRSparsity from .IDF import IDF from .MLMTransformer import MLMTransformer from .SpladePooling import SpladePooling __all__ = ["CSRSparsity", "MLMTransformer", "SpladePooling", "IDF"]
from __future__ import annotations from .CSRSparsity import CSRSparsity from .MLMTransformer import MLMTransformer from .SpladePooling import SpladePooling __all__ = ["CSRSparsity", "MLMTransformer", "SpladePooling"]
"""Standard LangChain interface tests""" import pytest from langchain_core.language_models import BaseChatModel from langchain_core.rate_limiters import InMemoryRateLimiter from langchain_core.tools import BaseTool from langchain_tests.integration_tests import ( ChatModelIntegrationTests, ) from langchain_groq im...
"""Standard LangChain interface tests""" import pytest from langchain_core.language_models import BaseChatModel from langchain_core.rate_limiters import InMemoryRateLimiter from langchain_core.tools import BaseTool from langchain_tests.integration_tests import ( ChatModelIntegrationTests, ) from langchain_groq im...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
"""Init file of LlamaIndex.""" __version__ = "0.12.37" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
"""Init file of LlamaIndex.""" __version__ = "0.12.36" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from itertools import groupby from typing import Iterable, Dict from jina import Executor, requests, DocumentArray class MinRanker(Executor): """ :class:`MinRanker` aggregates the score of the matched ...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from itertools import groupby from typing import Iterable, Dict from jina import Executor, requests, DocumentArray class MinRanker(Executor): """ :class:`MinRanker` aggregates the score of the matched ...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import shutil import subprocess from pathlib import Path import pytest from jina import Document, DocumentArray @pytest.fixture(scope="session", autouse=True) def download_cache(): subprocess.run( 'scri...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import shutil from pathlib import Path import pytest from jina import Document, DocumentArray @pytest.fixture(scope="session", autouse=True) def download_cache(): os.system('scripts/download_full.sh')...
from pathlib import Path import pytest from torchaudio.datasets import dr_vctk from torchaudio_unittest.common_utils import ( get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase, ) _SUBSETS = ["train", "test"] _CONDITIONS = ["clean", "device-recorded"] _SOURCES = ["DR-VCTK_Office1_ClosedWindow...
from pathlib import Path import pytest from torchaudio.datasets import dr_vctk from torchaudio_unittest.common_utils import ( TempDirMixin, TorchaudioTestCase, get_whitenoise, save_wav, ) _SUBSETS = ["train", "test"] _CONDITIONS = ["clean", "device-recorded"] _SOURCES = ["DR-VCTK_Office1_ClosedWindow...
from typing import Any, Optional, Sequence, Union from deprecated import deprecated from llama_index.core.base.llms.generic_utils import ( chat_response_to_completion_response, stream_chat_response_to_completion_response, astream_chat_response_to_completion_response, ) from llama_index.core.base.llms.types...
from typing import Any, Optional, Sequence from pathlib import Path from llama_index.core.base.llms.generic_utils import ( chat_response_to_completion_response, stream_chat_response_to_completion_response, astream_chat_response_to_completion_response, ) from llama_index.core.base.llms.types import ( Ch...
"""LLMResult class.""" from __future__ import annotations from copy import deepcopy from typing import Literal, Optional, Union from pydantic import BaseModel from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk from langchain_core.outputs.generation import Generation, GenerationCh...
from __future__ import annotations from copy import deepcopy from typing import Literal, Optional, Union from pydantic import BaseModel from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk from langchain_core.outputs.generation import Generation, GenerationChunk from langchain_core....
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( data_preprocessor=dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( data_preprocessor=dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675...
from keras.src import backend from keras.src import ops from keras.src.api_export import keras_export from keras.src.layers.input_spec import InputSpec from keras.src.layers.layer import Layer from keras.src.utils import argument_validation @keras_export("keras.layers.ZeroPadding1D") class ZeroPadding1D(Layer): "...
from keras.src import ops from keras.src.api_export import keras_export from keras.src.layers.input_spec import InputSpec from keras.src.layers.layer import Layer from keras.src.utils import argument_validation @keras_export("keras.layers.ZeroPadding1D") class ZeroPadding1D(Layer): """Zero-padding layer for 1D in...
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ParquetConfig(datasets.BuilderConfig): """BuilderCo...
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ParquetConfig(datasets.BuilderConfig): """BuilderCo...
from typing import Dict, Iterable import torch from torch import Tensor, nn from sentence_transformers import util from sentence_transformers.SentenceTransformer import SentenceTransformer class CoSENTLoss(nn.Module): def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwi...
import torch from torch import nn, Tensor from typing import Iterable, Dict from ..SentenceTransformer import SentenceTransformer from .. import util class CoSENTLoss(nn.Module): def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim): """ This cla...
import numpy as np from pydantic.tools import parse_obj_as from docarray.typing import ImageUrl, Tensor def test_image_url(): uri = parse_obj_as(ImageUrl, 'http://jina.ai/img.png') tensor = uri.load() assert isinstance(tensor, np.ndarray)
from pydantic.tools import parse_obj_as from docarray.typing import ImageUrl, Tensor def test_image_url(): uri = parse_obj_as(ImageUrl, 'http://jina.ai/img.png') tensor = uri.load() assert isinstance(tensor, Tensor)
from jina.schemas.helper import _cli_to_schema from jina_cli.export import api_to_dict for s in ('flow', 'gateway', 'executor', 'deployment'): a = _cli_to_schema(api_to_dict(), s) table = ['| Name | Description | Type | Default |', '|----|----|----|----|'] for k, v in a[f'Jina::{s.capitalize()}']['proper...
from jina.schemas.helper import _cli_to_schema from jina_cli.export import api_to_dict for s in ('flow', 'gateway', 'executor', 'deployment'): a = _cli_to_schema(api_to_dict(), s) table = ['| Name | Description | Type | Default |', '|----|----|----|----|'] for k, v in a[f'Jina::{s.capitalize()}']['proper...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.retrievers import GoogleDocumentAIWarehouseRetriever # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling op...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.retrievers import GoogleDocumentAIWarehouseRetriever # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling op...
# mypy: ignore-errors import argparse import torchgen.model as model from torchgen.gen import FileManager, parse_native_yaml def num_leading_spaces(line: str) -> int: return len(line) - len(line.lstrip()) def deindent(code: str) -> str: lines = code.split("\n") min_leading_spaces = min(map(num_leading...
# mypy: ignore-errors import argparse import torchgen.model as model from torchgen.gen import FileManager, parse_native_yaml def num_leading_spaces(line: str) -> int: return len(line) - len(line.lstrip()) def deindent(code: str) -> str: lines = code.split("\n") min_leading_spaces = min(map(num_leading...
import pytest from llama_index.core.base.embeddings.base_sparse import BaseSparseEmbedding from llama_index.sparse_embeddings.fastembed import FastEmbedSparseEmbedding def test_class(): names_of_base_classes = [b.__name__ for b in FastEmbedSparseEmbedding.__mro__] assert BaseSparseEmbedding.__name__ in names...
import pytest from llama_index.core.base.embeddings.base_sparse import BaseSparseEmbedding from llama_index.sparse_embeddings.fastembed import FastEmbedSparseEmbedding def test_class(): names_of_base_classes = [b.__name__ for b in FastEmbedSparseEmbedding.__mro__] assert BaseSparseEmbedding.__name__ in names...
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests."""
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests.""" pass
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class FlopsLoss(nn.Module): def __init__(self, model: SparseEncoder, threshold: float = None) -> None: """ ...
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class FlopsLoss(nn.Module): def __init__(self, model: SparseEncoder, threshold: float = None) -> None: """ ...
import os import os.path as osp import tempfile import unittest import numpy as np import torch from PIL import Image from mmdet.evaluation import CityScapesMetric try: import cityscapesscripts except ImportError: cityscapesscripts = None class TestCityScapesMetric(unittest.TestCase): def setUp(self):...
import os import os.path as osp import tempfile import unittest import numpy as np import torch from PIL import Image from mmdet.evaluation import CityScapesMetric try: import cityscapesscripts except ImportError: cityscapesscripts = None class TestCityScapesMetric(unittest.TestCase): def setUp(self):...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
import pathlib from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvision.prototype.datasets.utils._internal import (...
import pathlib from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper from torchvision.datapoints import BoundingBoxes from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils import Dataset, Enc...
""" This is a simple application for sentence embeddings: semantic search We have a corpus with various sentences. Then, for a given query sentence, we want to find the most similar sentence in this corpus. This script outputs for various queries the top 5 most similar sentences in the corpus. """ import torch from...
""" This is a simple application for sentence embeddings: semantic search We have a corpus with various sentences. Then, for a given query sentence, we want to find the most similar sentence in this corpus. This script outputs for various queries the top 5 most similar sentences in the corpus. """ import torch from...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.runner import Runner from mmdet.registry import RUNNERS from mmdet.utils import register_all_modules # TODO: support fuse_conv_bn and format_only def parse_arg...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.runner import Runner from mmdet.registry import RUNNERS from mmdet.utils import register_all_modules, replace_cfg_vals # TODO: support fuse_conv_bn and format_...
_base_ = './faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './faster_rcnn_r50_fpn_lsj_200e_8x8_fp16_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
import fnmatch import os from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, ...
import fnmatch import os from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, ...
from langchain_core.prompts.few_shot import ( FewShotChatMessagePromptTemplate, FewShotPromptTemplate, _FewShotPromptTemplateMixin, ) __all__ = [ "FewShotChatMessagePromptTemplate", "FewShotPromptTemplate", "_FewShotPromptTemplateMixin", ]
from langchain_core.prompts.few_shot import ( FewShotChatMessagePromptTemplate, FewShotPromptTemplate, _FewShotPromptTemplateMixin, ) __all__ = [ "FewShotPromptTemplate", "FewShotChatMessagePromptTemplate", "_FewShotPromptTemplateMixin", ]
# Copyright (c) OpenMMLab. All rights reserved. from .base_det_dataset import BaseDetDataset from .builder import DATASETS, PIPELINES, build_dataset from .cityscapes import CityscapesDataset from .coco import CocoDataset from .coco_panoptic import CocoPanopticDataset from .dataset_wrappers import MultiImageMixDataset f...
# Copyright (c) OpenMMLab. All rights reserved. from .builder import DATASETS, PIPELINES, build_dataset from .cityscapes import CityscapesDataset from .coco import CocoDataset from .coco_panoptic import CocoPanopticDataset from .dataset_wrappers import MultiImageMixDataset from .deepfashion import DeepFashionDataset fr...
__version__ = "2.8.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from .datasets import SentencesDataset, ParallelSentencesDataset from .LoggingHandler import LoggingHandler from .SentenceTransformer import SentenceTransformer from .readers import InputExample from .cross_enco...
__version__ = "2.8.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" from .datasets import SentencesDataset, ParallelSentencesDataset from .LoggingHandler import LoggingHandler from .SentenceTransformer import SentenceTransformer from .readers import InputExample from .cross_encoder.CrossEncoder import Cross...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa model = dict( type='LAD', data_preprocesso...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa preprocess_cfg = dict( mean=[123.675, 116.28, ...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import cv2 import mmcv from mmcv.transforms import Compose from mmengine.utils import track_iter_progress from mmdet.apis import inference_detector, init_detector from mmdet.registry import VISUALIZERS def parse_args(): parser = argparse.ArgumentPa...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import cv2 import mmcv from mmcv.transforms import Compose from mmengine.utils import track_iter_progress from mmdet.apis import inference_detector, init_detector from mmdet.registry import VISUALIZERS def parse_args(): parser = argparse.ArgumentPa...
"""Util that calls Bing Search.""" from typing import Any, Dict, List import requests from langchain_core.utils import get_from_dict_or_env from pydantic import BaseModel, ConfigDict, Field, model_validator # BING_SEARCH_ENDPOINT is the default endpoint for Bing Web Search API. # Currently There are two web-based Bi...
"""Util that calls Bing Search.""" from typing import Any, Dict, List import requests from langchain_core.utils import get_from_dict_or_env from pydantic import BaseModel, ConfigDict, Field, model_validator # BING_SEARCH_ENDPOINT is the default endpoint for Bing Web Search API. # Currently There are two web-based Bi...
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDoc from docarray.base_doc.io.json import orjson_dumps from docarray.typing import AudioTorchTensor, AudioUrl from docarray.utils._internal.misc import is_tf_avail...
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDoc from docarray.base_doc.io.json import orjson_dumps from docarray.typing import AudioTorchTensor, AudioUrl from docarray.utils.misc import is_tf_available from ...
import base64 import os import pytest import requests from llama_index.core.llms import LLM from llama_index.core.schema import ImageNode from llama_index.multi_modal_llms.gemini import GeminiMultiModal def test_embedding_class(): names_of_base_classes = [b.__name__ for b in GeminiMultiModal.__mro__] assert ...
from llama_index.core.multi_modal_llms.base import MultiModalLLM from llama_index.multi_modal_llms.gemini import GeminiMultiModal def test_embedding_class(): names_of_base_classes = [b.__name__ for b in GeminiMultiModal.__mro__] assert MultiModalLLM.__name__ in names_of_base_classes
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.registry import RUNNERS from mmengine.runner import Runner from mmdet.utils import setup_cache_size_limit_of_dynamo def parse_args(): parser = argparse.Arg...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import logging import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.logging import print_log from mmengine.registry import RUNNERS from mmengine.runner import Runner from mmdet.utils import setup_cache_size_limit_o...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from typing import TYPE_CHECKING, Any from docarray.base_doc.io.json import orjson_dumps from docarray.utils._internal.misc import import_library if TYPE_CHECKING: from fastapi.responses import JSONResponse else: fastapi = import_library('fastapi', raise_error=True) JSONResponse = fastapi.responses.JSONRe...
import argparse import os import shlex import subprocess def execute_command(command): command_list = shlex.split(command) subprocess.run(command_list, check=True, text=True) def main(): comment = os.environ["COMMENT"].splitlines()[0].strip() # Extract the command-line arguments from the comment ...
import argparse import os import shlex import subprocess def execute_command(command): command_list = shlex.split(command) subprocess.run(command_list, check=True, text=True) def main(): comment = os.environ["COMMENT"].splitlines()[0].strip() # Extract the command-line arguments from the comment ...
import os import time import pytest from jina import Document, DocumentArray from ..redis_storage import RedisStorage @pytest.fixture(scope='function') def indexer(): return RedisStorage() @pytest.fixture() def docker_compose(request): os.system( f'docker-compose -f {request.param} --project-direc...
import os import time from jina import Document, DocumentArray import pytest from ..redis_storage import RedisStorage @pytest.fixture(scope='function') def indexer(): return RedisStorage() @pytest.fixture() def docker_compose(request): os.system( f'docker-compose -f {request.param} --project-direc...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from typing import Iterable, Optional from jina import DocumentArray, Executor, requests from jina.logging.logger import JinaLogger from jina_commons.batching import get_docs_batch_generator fr...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from typing import Iterable, Optional from jina import DocumentArray, Executor, requests from jina.logging.logger import JinaLogger from jina_commons.batching import get_docs_batch_generator fr...
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import numpy as np from mmengine.config import Config, DictAction from mmengine.utils import ProgressBar from mmdet.models.utils import mask2ndarray from mmdet.registry import DATASETS, VISUALIZERS from mmdet.structures.bbox import ...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.models.utils import mask2ndarray from mmdet.registry import DATASETS, VISUALIZERS from mmdet.structures.bbox import BaseBoxes from mmdet.utils import regi...
from functools import wraps from typing import TYPE_CHECKING, List from jina.excepts import FlowBuildLevelError # noinspection PyUnreachableCode if TYPE_CHECKING: from jina.enums import FlowBuildLevel from jina.orchestrate.flow.base import Flow def allowed_levels(levels: List['FlowBuildLevel']): """Anno...
from functools import wraps from typing import TYPE_CHECKING, List from jina.excepts import FlowBuildLevelError # noinspection PyUnreachableCode if TYPE_CHECKING: from jina.enums import FlowBuildLevel from jina.orchestrate.flow.base import Flow def allowed_levels(levels: List['FlowBuildLevel']): """Anno...
from typing import Any, Dict from torchvision import datapoints from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2.utils import is_simple_tensor class UniformTemporalSubsample(Transform): """[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimensi...
from typing import Any, Dict from torchvision import datapoints from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2.utils import is_simple_tensor class UniformTemporalSubsample(Transform): _transformed_types = (is_simple_tensor, datapoints.Video) def __init__(sel...
# pylint: disable=protected-access """Shared typing definition.""" import ctypes import os from typing import ( TYPE_CHECKING, Any, AnyStr, Callable, Dict, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) # os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame...
# pylint: disable=protected-access """Shared typing definition.""" import ctypes import os from typing import ( TYPE_CHECKING, Any, AnyStr, Callable, Dict, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) # os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame...
import os import random import time from typing import Dict, OrderedDict import numpy as np import pytest from jina import Document, DocumentArray, Executor, Flow, requests from jina_commons.indexers.dump import dump_docs from jinahub.indexers.compound.FaissLMDBSearcher.faiss_lmdb import FaissLMDBSearcher from jinahu...
import os import random import time from typing import Dict, OrderedDict import numpy as np import pytest from jina import Document, DocumentArray, Executor, Flow, requests from jina_commons.indexers.dump import dump_docs from jinahub.indexers.compound.FaissLMDBSearcher.faiss_lmdb import FaissLMDBSearcher from jinahu...
"""Simple reader for mbox (mailbox) files.""" import os from pathlib import Path from typing import Any, List from llama_index.core.readers.base import BaseReader from llama_index.readers.file import MboxReader as MboxFileReader from llama_index.core.schema import Document class MboxReader(BaseReader): """ ...
"""Simple reader for mbox (mailbox) files.""" import os from pathlib import Path from typing import Any, List from llama_index.core.readers.base import BaseReader from llama_index.readers.file import MboxReader as MboxFileReader from llama_index.core.schema import Document class MboxReader(BaseReader): """Mbox ...
"""Simple Reader that reads transcript of youtube video.""" import re from typing import Any, List, Optional from youtube_transcript_api import YouTubeTranscriptApi from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document from llama_index.readers.youtube_transcript.ut...
"""Simple Reader that reads transcript of youtube video.""" import re from typing import Any, List, Optional from youtube_transcript_api import YouTubeTranscriptApi from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document from llama_index.readers.youtube_transcript.uti...