input
stringlengths
33
5k
output
stringlengths
32
5k
import prisma AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = { "Input": True, "Output": True, "Webhook": True, "AgentBlock": True, } AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = { "AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore } EXECUTION_RESULT_INCLUDE: prisma.types....
import prisma AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = { "Input": True, "Output": True, "AgentBlock": True, } AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = { "AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore } EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInc...
import pytest from llama_index.core.readers.base import BaseReader from llama_index.readers.hive.base import InvalidSqlError, _validate_sql_query from llama_index.readers.hive import HiveReader def test_class(): assert issubclass(HiveReader, BaseReader) def test_validation(): with pytest.raises(InvalidSqlE...
from llama_index.core.readers.base import BaseReader from llama_index.readers.hive import HiveReader def test_class(): names_of_base_classes = [b.__name__ for b in HiveReader.__mro__] assert BaseReader.__name__ in names_of_base_classes
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# Copyright (c) OpenMMLab. All rights reserved. import base64 import os import mmcv import torch from ts.torch_handler.base_handler import BaseHandler from mmdet.apis import inference_detector, init_detector class MMdetHandler(BaseHandler): threshold = 0.5 def initialize(self, context): properties ...
# Copyright (c) OpenMMLab. All rights reserved. import base64 import os import mmcv import torch from ts.torch_handler.base_handler import BaseHandler from mmdet.apis import inference_detector, init_detector class MMdetHandler(BaseHandler): threshold = 0.5 def initialize(self, context): properties ...
from typing import Any from langchain_core.agents import AgentAction from langchain_core.prompts.chat import ChatPromptTemplate class AgentScratchPadChatPromptTemplate(ChatPromptTemplate): """Chat prompt template for the agent scratchpad.""" @classmethod def is_lc_serializable(cls) -> bool: retu...
from typing import Any, Dict, List, Tuple from langchain_core.agents import AgentAction from langchain_core.prompts.chat import ChatPromptTemplate class AgentScratchPadChatPromptTemplate(ChatPromptTemplate): """Chat prompt template for the agent scratchpad.""" @classmethod def is_lc_serializable(cls) ->...
from typing import Any, Dict, Iterable import torch from torch import Tensor, nn from sentence_transformers import util from sentence_transformers.SentenceTransformer import SentenceTransformer class MultipleNegativesSymmetricRankingLoss(nn.Module): def __init__(self, model: SentenceTransformer, scale: float = ...
from typing import Dict, Iterable import torch from torch import Tensor, nn from sentence_transformers import util from sentence_transformers.SentenceTransformer import SentenceTransformer class MultipleNegativesSymmetricRankingLoss(nn.Module): def __init__(self, model: SentenceTransformer, scale: float = 20.0,...
import functools import pytest from jina.helper import iscoroutinefunction from jina.serve.executors import get_executor_taboo from jina.serve.executors.decorators import dynamic_batching, requests from jina.serve.helper import store_init_kwargs def test_store_init_kwargs(): store_init_kwargs_decorator = functo...
import functools import pytest from jina.helper import iscoroutinefunction from jina.serve.executors import get_executor_taboo from jina.serve.executors.decorators import requests from jina.serve.helper import store_init_kwargs def test_store_init_kwargs(): store_init_kwargs_decorator = functools.partial( ...
class DataAdapter: """Base class for input data adapters. The purpose of a DataAdapter is to provide a unified interface to iterate over input data provided in a variety of formats -- such as NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc. """ def get_numpy_iterator(self): ...
class DataAdapter: """Base class for input data adapters. The purpose of a DataAdapter is to provide a unfied interface to iterate over input data provided in a variety of formats -- such as NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc. """ def get_numpy_iterator(self): ...
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa model = dict( backbone=dict( depths=[2, 2, 18, 2], init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py' pretrained = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth' # noqa model = dict( backbone=dict(depths=[2, 2, 18, 2]), init_cfg=dict(type='Pretrained', checkpoi...
from pydantic import BaseModel from backend.data.block import ( Block, BlockCategory, BlockManualWebhookConfig, BlockOutput, BlockSchema, ) from backend.data.model import SchemaField from backend.integrations.providers import ProviderName from backend.integrations.webhooks.compass import CompassWeb...
from pydantic import BaseModel from backend.data.block import ( Block, BlockCategory, BlockManualWebhookConfig, BlockOutput, BlockSchema, ) from backend.data.model import SchemaField from backend.integrations.providers import ProviderName from backend.integrations.webhooks.compass import CompassWeb...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_p...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_p...
from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.audio_url import AudioUrl from docarray.typing.url.image_url import ImageUrl from docarray.typing.url.text_url import TextUrl from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl from docarray.typing.url.url_3d.point_cloud_url import PointClou...
from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.image_url import ImageUrl from docarray.typing.url.text_url import TextUrl from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl __all__ = ['ImageUrl', 'AnyUrl', 'TextUrl',...
""" This script contains an example how to perform semantic search with Elasticsearch. You need Elasticsearch up and running locally: https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.rea...
""" This script contains an example how to perform semantic search with Elasticsearch. You need Elasticsearch up and running locally: https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.rea...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import tempfile from collections import OrderedDict import torch from mmengine import Config from mmengine.utils import digit_version def parse_config(config_strings): temp_file = tempfile.NamedTemporaryFile() config_path = f'{temp_file.name}.py...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import tempfile from collections import OrderedDict import torch from mmengine import Config def parse_config(config_strings): temp_file = tempfile.NamedTemporaryFile() config_path = f'{temp_file.name}.py' with open(config_path, 'w') as f: ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Sequence from mmengine.hooks import Hook from mmengine.model import is_model_wrapper from mmdet.registry import HOOKS @HOOKS.register_module() class YOLOXModeSwitchHook(Hook): """Switch the mode of YOLOX during training. This hook turns off...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Sequence from mmengine.hooks import Hook from mmengine.model import is_model_wrapper from mmdet.registry import HOOKS @HOOKS.register_module() class YOLOXModeSwitchHook(Hook): """Switch the mode of YOLOX during training. This hook turns off...
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py' # model settings model = dict( neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), dict( type='BFP', in_channels=256, num_l...
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py' # model settings model = dict( neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), dict( type='BFP', in_channels=256, num_l...
import functools import pytest from jina.helper import iscoroutinefunction from jina.serve.executors import get_executor_taboo from jina.serve.executors.decorators import requests from jina.serve.helper import store_init_kwargs def test_store_init_kwargs(): store_init_kwargs_decorator = functools.partial( ...
import functools import pytest from jina.helper import iscoroutinefunction from jina.serve.executors import get_default_metas, get_executor_taboo from jina.serve.executors.decorators import requests from jina.serve.helper import store_init_kwargs, wrap_func def test_store_init_kwargs(): store_init_kwargs_decora...
"""Init file.""" from llama_index.core.llama_pack.base import BaseLlamaPack from llama_index.core.llama_pack.download import download_llama_pack __all__ = [ "BaseLlamaPack", "download_llama_pack", ]
"""Init file.""" from llama_index.core.llama_pack.base import BaseLlamaPack from llama_index.core.llama_pack.download import download_llama_pack __all__ = [ "BaseLlamaPack", "download_llama_pack", ]
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( # use ResNeSt img_norm data_preprocessor=dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], bgr_to_rgb=True), backbone=dict( type='ResNeSt', ...
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( # use ResNeSt img_norm data_preprocessor=dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], bgr_to_rgb=True), backbone=dict( type='ResNeSt', ...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest from mmengine.logging import HistoryBuffer array_method = [np.array, lambda x: x] try: import torch except ImportError: pass else: array_method.append(torch.tensor) class TestLoggerBuffer: def test_init(self): ...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest import torch from mmengine import HistoryBuffer class TestLoggerBuffer: def test_init(self): log_buffer = HistoryBuffer() assert log_buffer.max_length == 1000000 log_history, counts = log_buffer.data ...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.utils import (LearnedPositionalEncoding, SinePositionalEncoding) def test_sine_positional_encoding(num_feats=16, batch_size=2): # test invalid type of scale with pytest.raises(Assertio...
import pytest import torch from mmdet.models.utils import (LearnedPositionalEncoding, SinePositionalEncoding) def test_sine_positional_encoding(num_feats=16, batch_size=2): # test invalid type of scale with pytest.raises(AssertionError): module = SinePositionalEncoding...
""" This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage:...
""" This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair. It does NOT produce a sentence embedding and does NOT work for individual sentences. Usage:...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
import sys from os import path from setuptools import find_packages from setuptools import setup if sys.version_info < (3, 7, 0): raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}') try: pkg_name = 'docarray' libinfo_py = path.join(pkg_name, '__init__.py') libinfo_content = o...
from typing import Any, Collection, List, Optional, Tuple, Union from llama_index.core.tools.types import AsyncBaseTool from pydantic import BaseModel class LLMCompilerParseResult(BaseModel): """LLMCompiler parser result.""" thought: str idx: int tool_name: str args: str class JoinerOutput(Bas...
from typing import Any, Collection, List, Optional, Tuple, Union from llama_index.core.tools.types import AsyncBaseTool from pydantic import BaseModel class LLMCompilerParseResult(BaseModel): """LLMCompiler parser result.""" thought: str idx: int tool_name: str args: str class JoinerOutput(Bas...
from llama_index.core.base.llms.types import ( LLMMetadata, ) from llama_index.core.bridge.pydantic import Field from llama_index.llms.openai_like import OpenAILike class LlamaAPI(OpenAILike): """LlamaAPI LLM. Examples: `pip install llama-index-llms-llama-api` ```python from llam...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.core.bridge.pydantic import Field, PrivateAttr from llama_index.core.c...
from __future__ import annotations from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseGISTEmbedLoss(GISTEmbedLoss): def __init__( self, model: SparseEncoder, guide: SparseEncoder, ...
from __future__ import annotations from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseGISTEmbedLoss(GISTEmbedLoss): def __init__( self, model: SparseEncoder, guide: SparseEncoder, ...
import os import urllib import pytest from pydantic import parse_obj_as, schema_json_of from docarray.base_doc.io.json import orjson_dumps from docarray.typing import TextUrl from tests import TOYDATA_DIR REMOTE_TEXT_FILE = 'https://de.wikipedia.org/wiki/Brixen' CUR_DIR = os.path.dirname(os.path.abspath(__file__)) L...
import os import urllib import pytest from pydantic import parse_obj_as, schema_json_of from docarray.base_document.io.json import orjson_dumps from docarray.typing import TextUrl from tests import TOYDATA_DIR REMOTE_TEXT_FILE = 'https://de.wikipedia.org/wiki/Brixen' CUR_DIR = os.path.dirname(os.path.abspath(__file...
import os import pytest import yaml from jina import Gateway from jina.jaml import JAML from jina.serve.executors import BaseExecutor class MyDummyGateway(Gateway): async def setup_server(self): self.server = 'dummy server' async def run_server(self): self.logger.info(self.server) asyn...
import os import yaml from jina import Gateway from jina.jaml import JAML from jina.serve.executors import BaseExecutor class MyDummyGateway(Gateway): async def setup_server(self): self.server = 'dummy server' async def run_server(self): self.logger.info(self.server) async def shutdown...
from groq._utils._utils import quote from backend.blocks.jina._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, JinaCredentials, JinaCredentialsField, JinaCredentialsInput, ) from backend.blocks.search import GetRequest from backend.data.block import Block, BlockCategory, BlockOutput, BlockS...
from groq._utils._utils import quote from backend.blocks.jina._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, JinaCredentials, JinaCredentialsField, JinaCredentialsInput, ) from backend.blocks.search import GetRequest from backend.data.block import Block, BlockCategory, BlockOutput, BlockS...
import multiprocessing import random import time from functools import partial import pytest from jina import Client, Document, DocumentArray, Executor, Flow, requests from jina.types.request.data import Response NUM_REQUESTS = 5 class MyExecutor(Executor): @requests(on='/ping') def ping(self, **kwargs): ...
import multiprocessing import random import time from functools import partial import pytest from jina import Client, Document, DocumentArray, Executor, Flow, requests from jina.types.request.data import Response NUM_REQUESTS = 5 class MyExecutor(Executor): @requests(on='/ping') def ping(self, **kwargs): ...
"""Gemini embeddings file.""" import deprecated from typing import Any, List, Optional from llama_index.core.base.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, ) from llama_index.core.bridge.pydantic import Field, PrivateAttr from llama_index.core.callbacks.base import CallbackManager imp...
"""Gemini embeddings file.""" import deprecated from typing import Any, List, Optional from llama_index.core.base.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, ) from llama_index.core.bridge.pydantic import Field, PrivateAttr from llama_index.core.callbacks.base import CallbackManager imp...
import subprocess import pytest from jina import Document, DocumentArray, Flow from ...flair_text import FlairTextEncoder _EMBEDDING_DIM = 100 @pytest.mark.parametrize('request_size', [1, 10, 50, 100]) def test_integration(request_size: int): docs = DocumentArray( [Document(text='just some random text ...
import subprocess import pytest from jina import Document, DocumentArray, Flow from ...flair_text import FlairTextEncoder _EMBEDDING_DIM = 100 @pytest.mark.parametrize('request_size', [1, 10, 50, 100]) def test_integration(request_size: int): docs = DocumentArray( [Document(text='just some random text ...
from .AdaptiveLayerLoss import AdaptiveLayerLoss from .CosineSimilarityLoss import CosineSimilarityLoss from .SoftmaxLoss import SoftmaxLoss from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss from .TripletLoss i...
from .CosineSimilarityLoss import CosineSimilarityLoss from .SoftmaxLoss import SoftmaxLoss from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss from .TripletLoss import TripletDistanceMetric, TripletLoss from .Ma...
from typing import List from pydantic import BaseModel from backend.blocks.exa._auth import ( ExaCredentials, ExaCredentialsField, ExaCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request impor...
from typing import List from pydantic import BaseModel from backend.blocks.exa._auth import ( ExaCredentials, ExaCredentialsField, ExaCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request impor...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.config import read_base with read_base(): from mmdet.configs.retinanet.retinanet_r50_caffe_fpn_1x_coco import * from mmdet.configs.retinanet.retinanet_r101_caffe_fpn_1x_coco import \ model as r101 model = r101
# Copyright (c) OpenMMLab. All rights reserved. if '_base_': from mmdet.configs.retinanet.retinanet_r50_caffe_fpn_1x_coco import * from mmdet.configs.retinanet.retinanet_r101_caffe_fpn_1x_coco import \ model as r101 model = r101
"""Init file of LlamaIndex.""" __version__ = "0.12.24.post1" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index...
"""Init file of LlamaIndex.""" __version__ = "0.12.24" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
from dataclasses import dataclass from functools import partial from typing import Callable import torch import torchaudio from torchaudio.models import conv_tasnet_base, hdemucs_high @dataclass class SourceSeparationBundle: """Dataclass that bundles components for performing source separation. Example ...
from dataclasses import dataclass from functools import partial from typing import Callable import torch import torchaudio from torchaudio.models import conv_tasnet_base, hdemucs_high @dataclass class SourceSeparationBundle: """Dataclass that bundles components for performing source separation. Example ...
""" =================================================== Recursive feature elimination with cross-validation =================================================== A Recursive Feature Elimination (RFE) example with automatic tuning of the number of features selected with cross-validation. """ # Authors: The scikit-learn...
""" =================================================== Recursive feature elimination with cross-validation =================================================== A Recursive Feature Elimination (RFE) example with automatic tuning of the number of features selected with cross-validation. """ # Authors: The scikit-learn...
_base_ = './cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = '../glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py' lang_model_name = 'bert-base-uncased' model = dict(bbox_head=dict(early_fuse=True)) dataset_type = 'Flickr30kDataset' data_root = 'data/flickr30k_entities/' test_pipeline = [ dict( type='LoadImageFromFile', backend_args=None, imdecod...
_base_ = '../glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py' lang_model_name = 'bert-base-uncased' model = dict(bbox_head=dict(early_fuse=True), ) dataset_type = 'Flickr30kDataset' data_root = 'data/flickr30k/' test_pipeline = [ dict( type='LoadImageFromFile', backend_args=None, imdecode_backe...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6, use_legacy_coordinate=False): """Calculate the ious between each bbox of bboxes1 and bboxes2. Args: bbox...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6): """Calculate the ious between each bbox of bboxes1 and bboxes2. Args: bboxes1(ndarray): shape (n, 4) bboxes2(ndarray): shape (k, 4) mode(str): iou (intersectio...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode @_register_proto(proto_type_name='audio_torch_tensor') class AudioTorchTensor(AbstractAudioTensor,...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode @_register_proto(proto_type_name='audio_torch_tensor') class AudioTorchTensor(AbstractAudioTensor,...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .base_detr import DetectionTransformer from .boxinst import BoxInst from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .condinst import CondInst from .co...
# Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .boxinst import BoxInst from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .condinst import CondInst from .cornernet import CornerNet from .crowddet impo...
# Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import urllib import warnings from typing import Union import torch from mmengine.config import Config, ConfigDict from mmengine.logging import print_log from mmengine.utils import scandir IMG_EXTENSIONS = ('.jpg', '.jpeg', '....
# Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import warnings from typing import Union from mmengine.config import Config, ConfigDict from mmengine.logging import print_log def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working...
""" This is a simple application for sentence embeddings: semantic search We have a corpus with various sentences. Then, for a given query sentence, we want to find the most similar sentence in this corpus. This script outputs for various queries the top 5 most similar sentences in the corpus. """ import torch from...
""" This is a simple application for sentence embeddings: semantic search We have a corpus with various sentences. Then, for a given query sentence, we want to find the most similar sentence in this corpus. This script outputs for various queries the top 5 most similar sentences in the corpus. """ import torch from...
# Copyright (c) OpenMMLab. All rights reserved. from .anchor_free_head import AnchorFreeHead from .anchor_head import AnchorHead from .atss_head import ATSSHead from .autoassign_head import AutoAssignHead from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead from .cascade_rpn_head import CascadeRPNHead, StageCasca...
# Copyright (c) OpenMMLab. All rights reserved. from .anchor_free_head import AnchorFreeHead from .anchor_head import AnchorHead from .atss_head import ATSSHead from .autoassign_head import AutoAssignHead from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead from .centernet_head import CenterNetHead from .c...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.backbones.hourglass import HourglassNet def test_hourglass_backbone(): with pytest.raises(AssertionError): # HourglassNet's num_stacks should larger than 0 HourglassNet(num_stacks=0) with pytest.rais...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.backbones.hourglass import HourglassNet def test_hourglass_backbone(): with pytest.raises(AssertionError): # HourglassNet's num_stacks should larger than 0 HourglassNet(num_stacks=0) with pytest.rais...
from ...utils import is_flax_available, is_torch_available if is_torch_available(): from .controlnet import ControlNetModel, ControlNetOutput from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel from .controlnet_hunyuan import ( HunyuanControlNetOutput, ...
from ...utils import is_flax_available, is_torch_available if is_torch_available(): from .controlnet import ControlNetModel, ControlNetOutput from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel from .controlnet_hunyuan import ( HunyuanControlNetOutput, ...
# Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import warnings from mmengine.config import Config, ConfigDict from mmengine.logging import print_log def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working directory. Args: ...
# Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import warnings from mmengine.config import Config, ConfigDict from mmengine.logging import print_log def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working directory. Args: ...
import pathlib from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvisio...
import pathlib from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource from torchvision.prototype.da...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet import * # noqa from mmdet.core import DetDataSample from mmdet.testing import demo_mm_inputs, get_detector_cfg class TestSingleStageInstanceSegmentor(TestCa...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet import * # noqa from mmdet.core import DetDataSample from .utils import demo_mm_inputs, get_detector_cfg class TestSingleStageInstanceSegmentor(TestCase): ...
from __future__ import annotations from .PhraseTokenizer import PhraseTokenizer from .WhitespaceTokenizer import WhitespaceTokenizer from .WordTokenizer import ENGLISH_STOP_WORDS, TransformersTokenizerWrapper, WordTokenizer __all__ = [ "WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLIS...
from __future__ import annotations from .PhraseTokenizer import PhraseTokenizer from .WhitespaceTokenizer import WhitespaceTokenizer from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer __all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py'] # optimizer model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) optim_wrapper...
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py'] # optimizer model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4)) optim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01))
from typing import Any, Optional, Type, TypeVar, Union from pydantic import Field from docarray.base_doc import BaseDoc from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='TextDoc') class TextDoc(BaseDoc): """ Document for handling text. ...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_doc import BaseDoc from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='TextDoc') class TextDoc(BaseDoc): """ Document for handling text. It can contain: - a [...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from pathlib import Path import pytest @pytest.fixture(scope='session') def docker_image_name() -> str: return Path(__file__).parents[1].stem.lower() @pytest.fixture(scope='session') def bui...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import pytest from jina import Document, DocumentArray @pytest.fixture() def test_dir() -> str: return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def data_generator(test_dir: str): ...
AMI_ID = { # Managed by XGBoost team "linux-amd64-gpu": { "us-west-2": "ami-08c3bc1dd5ec8bc5c", }, "linux-amd64-mgpu": { "us-west-2": "ami-08c3bc1dd5ec8bc5c", }, "windows-gpu": { "us-west-2": "ami-03c7f2156f93b22a7", }, "windows-cpu": { "us-west-2": "ami-0...
AMI_ID = { # Managed by XGBoost team "linux-amd64-gpu": { "us-west-2": "ami-094271bed4788ddb5", }, "linux-amd64-mgpu": { "us-west-2": "ami-094271bed4788ddb5", }, "windows-gpu": { "us-west-2": "ami-0839681594a1d7627", }, "windows-cpu": { "us-west-2": "ami-0...
import builtins import json from enum import Enum from typing import List, Optional, Type, Union from langchain_core.callbacks import AsyncCallbackManagerForToolRun from pydantic import BaseModel, Field from langchain_community.tools.ainetwork.base import AINBaseTool class AppOperationType(str, Enum): """Type o...
import builtins import json from enum import Enum from typing import List, Optional, Type, Union from langchain_core.callbacks import AsyncCallbackManagerForToolRun from pydantic import BaseModel, Field from langchain_community.tools.ainetwork.base import AINBaseTool class AppOperationType(str, Enum): """Type o...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.)
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.)
import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from typing import Literal from pydantic import BaseModel, ConfigDict, SecretStr from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import ( CredentialsField, C...
import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from typing import Literal from pydantic import BaseModel, ConfigDict, SecretStr from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import ( CredentialsField, C...
_base_ = './fast-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='BN', requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( ...
_base_ = './fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='BN', requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( ...
_base_ = [ '../_base_/models/cascade-rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), output_img=True), neck=d...
_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), output_img=True), neck=d...
# Copyright (c) OpenMMLab. All rights reserved. _base_ = [ 'mmdet::_base_/models/faster-rcnn_r50_fpn.py', 'mmdet::_base_/datasets/coco_detection.py', 'mmdet::_base_/schedules/schedule_1x.py', 'mmdet::_base_/default_runtime.py' ]
# Copyright (c) OpenMMLab. All rights reserved. _base_ = [ 'mmdet::_base_/models/faster_rcnn_r50_fpn.py', 'mmdet::_base_/datasets/coco_detection.py', 'mmdet::_base_/schedules/schedule_1x.py', 'mmdet::_base_/default_runtime.py' ]
import math import torch import torchaudio.prototype.functional as F from parameterized import parameterized from torch.autograd import gradcheck from torchaudio_unittest.common_utils import TestBaseMixin class AutogradTestImpl(TestBaseMixin): @parameterized.expand( [ (8000, (2, 3, 5, 7)), ...
import torch import torchaudio.prototype.functional as F from parameterized import parameterized from torch.autograd import gradcheck from torchaudio_unittest.common_utils import TestBaseMixin class AutogradTestImpl(TestBaseMixin): @parameterized.expand( [ (8000, (2, 3, 5, 7)), (80...
#!/usr/bin/env python3 # Write the available versions page (--rst) and the version switcher JSON (--json). # Version switcher see: # https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/version-dropdown.html # https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/announcements.html#announcement-ba...
#!/usr/bin/env python3 # Write the available versions page (--rst) and the version switcher JSON (--json). # Version switcher see: # https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/version-dropdown.html # https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/announcements.html#announcement-ba...
""" =================================== Visualizations with Display Objects =================================== .. currentmodule:: sklearn.metrics In this example, we will construct display objects, :class:`ConfusionMatrixDisplay`, :class:`RocCurveDisplay`, and :class:`PrecisionRecallDisplay` directly from their resp...
""" =================================== Visualizations with Display Objects =================================== .. currentmodule:: sklearn.metrics In this example, we will construct display objects, :class:`ConfusionMatrixDisplay`, :class:`RocCurveDisplay`, and :class:`PrecisionRecallDisplay` directly from their resp...
from __future__ import annotations import json import os from typing import Any import torch from torch import nn class SpladePooling(nn.Module): """SPLADE pooling layer that aggregates MLM logits using max or sum pooling. This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size) ...
from __future__ import annotations import json import os from typing import Any import torch from torch import nn class SpladePooling(nn.Module): """SPLADE pooling layer that aggregates MLM logits using max or sum pooling. This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size) ...
# Copyright (c) OpenMMLab. All rights reserved. from .dropblock import DropBlock from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder __all__ = [ 'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder', 'MSDeformAttnPixel...
# Copyright (c) OpenMMLab. All rights reserved. from .dropblock import DropBlock from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder __all__ = ['DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder']
r""" AgentSearch reader. Example as of 1/8/2024: ```python AgentSearch = download_loader("AgentSearch") document = reader.load_data( query="latest news", search_provider="bing" )[0] print(f'Document:\n{document} ') ``` ```plaintext Document: Doc ID: 67a57dfe-8bd6-4c69-af9d-683e76177119 Text: The latest new...
r"""AgentSearch reader. Example as of 1/8/2024: ```python AgentSearch = download_loader("AgentSearch") document = reader.load_data( query="latest news", search_provider="bing" )[0] print(f'Document:\n{document} ') ``` ```plaintext Document: Doc ID: 67a57dfe-8bd6-4c69-af9d-683e76177119 Text: The latest news...
""" ===================================== How to write your own Datapoint class ===================================== This guide is intended for advanced users and downstream library maintainers. We explain how to write your own datapoint class, and how to make it compatible with the built-in Torchvision v2 transforms...
""" ===================================== How to write your own Datapoint class ===================================== This guide is intended for downstream library maintainers. We explain how to write your own datapoint class, and how to make it compatible with the built-in Torchvision v2 transforms. Before continuing...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
"""Callback Handler that prints to std out.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from typing_extensions import override from langchain_core.callbacks.base import BaseCallbackHandler from langchain_core.utils import print_text if TYPE_CHECKING: from langchain_cor...
"""Callback Handler that prints to std out.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from langchain_core.callbacks.base import BaseCallbackHandler from langchain_core.utils import print_text if TYPE_CHECKING: from langchain_core.agents import AgentAction, AgentFinish...
"""General node utils.""" import logging import uuid from typing import List, Optional, Protocol, runtime_checkable from llama_index.core.schema import ( BaseNode, Document, ImageDocument, ImageNode, NodeRelationship, TextNode, ) from llama_index.core.utils import truncate_text logger = loggi...
"""General node utils.""" import logging import uuid from typing import List, Optional, Protocol, runtime_checkable from llama_index.core.schema import ( BaseNode, Document, ImageDocument, ImageNode, NodeRelationship, TextNode, ) from llama_index.core.utils import truncate_text logger = loggi...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) file_client_args = dict(backend='disk') # comment out the code below to use different file client # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # ...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) file_client_args = dict(backend='disk') # comment out the code below to use different file client # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # ...
from typing import List, Optional, Literal from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document class SpiderWebReader(BasePydanticReader): """ Scrapes a URL for data and returns llm-ready data with `Spider.cloud`. Must have the Python package `spider-...
from typing import List, Optional, Literal from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document class SpiderWebReader(BasePydanticReader): """ Scrapes a URL for data and returns llm-ready data with `Spider.cloud`. Must have the Python package `spider-...
import os import time import pytest from jina import Client, Document, DocumentArray, Flow cur_dir = os.path.dirname(os.path.abspath(__file__)) img_name = 'jina/replica-exec' @pytest.fixture(scope='function') def docker_image_built(): import docker client = docker.from_env() client.images.build(path=...
import os import time import pytest from jina import Client, Document, DocumentArray, Flow cur_dir = os.path.dirname(os.path.abspath(__file__)) img_name = 'jina/replica-exec' exposed_port = 12345 @pytest.fixture(scope='function') def docker_image_built(): import docker client = docker.from_env() clie...
import logging import time from abc import ABC, abstractmethod from typing import ClassVar, Optional from backend.data.model import OAuth2Credentials from backend.integrations.providers import ProviderName logger = logging.getLogger(__name__) class BaseOAuthHandler(ABC): # --8<-- [start:BaseOAuthHandler1] P...
import logging import time from abc import ABC, abstractmethod from typing import ClassVar, Optional from backend.data.model import OAuth2Credentials from backend.integrations.providers import ProviderName logger = logging.getLogger(__name__) class BaseOAuthHandler(ABC): # --8<-- [start:BaseOAuthHandler1] P...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule class BaseMaskHead(BaseModule, metaclass=ABCMeta): """Base class for mask heads used in One-Stage Instance Segmentation.""" def __init__(self, init_cfg): super(BaseMaskHead, sel...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule class BaseMaskHead(BaseModule, metaclass=ABCMeta): """Base class for mask heads used in One-Stage Instance Segmentation.""" def __init__(self, init_cfg): super(BaseMaskHead, sel...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( type='MaskRCNN', preprocess_cfg=preprocess_cfg, backbone=dict( type='ResNet', depth=50, num_stages=4, out_indi...
# model settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) model = dict( type='MaskRCNN', img_norm_cfg=img_norm_cfg, backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages...
from pathlib import Path from typing import Any, Callable, Optional, Tuple import PIL.Image from .utils import download_and_extract_archive from .vision import VisionDataset class SUN397(VisionDataset): """`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_. The SUN397 or Scene UNderst...
from pathlib import Path from typing import Any, Callable, Optional, Tuple import PIL.Image from .utils import download_and_extract_archive from .vision import VisionDataset class SUN397(VisionDataset): """`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_. The SUN397 or Scene UNderst...
# Copyright (c) OpenMMLab. All rights reserved. import time import pytest import mmengine def test_timer_init(): timer = mmengine.Timer(start=False) assert not timer.is_running timer.start() assert timer.is_running timer = mmengine.Timer() assert timer.is_running def test_timer_run(): ...
# Copyright (c) OpenMMLab. All rights reserved. import time import mmcv import pytest def test_timer_init(): timer = mmcv.Timer(start=False) assert not timer.is_running timer.start() assert timer.is_running timer = mmcv.Timer() assert timer.is_running def test_timer_run(): timer = mmcv....
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3,...
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3,...
import multiprocessing import os import signal import time import pytest from jina import Document, DocumentArray, Executor, requests from jina.clients.request import request_generator from jina.parsers import set_gateway_parser from jina.serve.networking.utils import send_request_sync from jina_cli.api import execut...
import multiprocessing import os import signal import time import pytest from jina import Document, DocumentArray, Executor, requests from jina.clients.request import request_generator from jina.parsers import set_gateway_parser from jina.serve.networking.utils import send_request_sync from jina_cli.api import execut...
from typing import Union from fastapi import FastAPI from pydantic import BaseModel class Item(BaseModel): name: str description: Union[str, None] = None price: float tax: Union[float, None] = None app = FastAPI() @app.post("/items/") async def create_item(item: Item): item_dict = item.dict()...
from typing import Union from fastapi import FastAPI from pydantic import BaseModel class Item(BaseModel): name: str description: Union[str, None] = None price: float tax: Union[float, None] = None app = FastAPI() @app.post("/items/") async def create_item(item: Item): item_dict = item.dict()...
"""Chat Message.""" from typing import Any, Literal from typing_extensions import override from langchain_core.messages.base import ( BaseMessage, BaseMessageChunk, merge_content, ) from langchain_core.utils._merge import merge_dicts class ChatMessage(BaseMessage): """Message that can be assigned a...
"""Chat Message.""" from typing import Any, Literal from typing_extensions import override from langchain_core.messages.base import ( BaseMessage, BaseMessageChunk, merge_content, ) from langchain_core.utils._merge import merge_dicts class ChatMessage(BaseMessage): """Message that can be assigned a...
"""Test HyDE.""" from typing import Any, Optional import numpy as np from langchain_core.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain_core.embeddings import Embeddings from langchain_core.language_models.llms import BaseLLM from langchain_core.outputs im...
"""Test HyDE.""" from typing import Any, Optional import numpy as np from langchain_core.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain_core.embeddings import Embeddings from langchain_core.language_models.llms import BaseLLM from langchain_core.outputs im...
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) model = dict( # use caffe img_norm preprocess_cfg=preprocess_cfg, backbone=dict( norm_cfg=dict(requires_grad=False), styl...
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.5...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from typing import Optional import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, ...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from typing import Optional import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, ...
import os import sys import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm sys.path.append("tests/python") import test_basic_models as test_bm # Don't import the test class, otherwise they will run twice. import test_callback as test_cb # noqa rng = np.random.RandomState(1994) ...
import os import sys import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm sys.path.append("tests/python") import test_basic_models as test_bm # Don't import the test class, otherwise they will run twice. import test_callback as test_cb # noqa rng = np.random.RandomState(1994) ...
from typing_extensions import TYPE_CHECKING if TYPE_CHECKING: from rich.console import Console, ConsoleOptions, RenderResult from rich.measure import Measurement from docarray.typing.tensor.abstract_tensor import AbstractTensor class TensorDisplay: """ Rich representation of a tensor. """ ...
from typing_extensions import TYPE_CHECKING if TYPE_CHECKING: from rich.console import Console, ConsoleOptions, RenderResult from rich.measure import Measurement from docarray.typing.tensor.abstract_tensor import AbstractTensor class TensorDisplay: """ Rich representation of a tensor. """ ...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], ...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], ...
from .dpr_reader import DPRReaderRanker
from .dpr_reader import DPRReaderRanker
import logging import os from typing import Optional from jina import __default_host__ from jina.importer import ImportExtensions from jina.serve.gateway import BaseGateway from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app class WebSocketGateway(BaseGateway): """WebSocket Gateway implementati...
import logging import os from typing import Optional from jina import __default_host__ from jina.importer import ImportExtensions from jina.serve.gateway import BaseGateway from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app class WebSocketGateway(BaseGateway): """WebSocket Gateway implementati...
import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from typing import Literal from pydantic import BaseModel, ConfigDict, SecretStr from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import ( CredentialsField, C...
import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from pydantic import BaseModel, ConfigDict from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import BlockSecret, SchemaField, SecretField class EmailCredentials(Base...
""" Demo for prediction using individual trees and model slices =========================================================== """ import os import numpy as np from scipy.special import logit from sklearn.datasets import load_svmlight_file import xgboost as xgb CURRENT_DIR = os.path.dirname(__file__) train = os.path.jo...
""" Demo for prediction using individual trees and model slices =========================================================== """ import os import numpy as np from scipy.special import logit from sklearn.datasets import load_svmlight_file import xgboost as xgb CURRENT_DIR = os.path.dirname(__file__) train = os.path.jo...
from llama_index.core.storage.kvstore.types import BaseKVStore from llama_index.storage.docstore.azurecosmosnosql import AzureCosmosNoSqlDocumentStore def test_class(): names_of_base_classes = [b.__name__ for b in AzureCosmosNoSqlDocumentStore.__mro__] assert BaseKVStore.__name__ in names_of_base_classes
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore from llama_index.storage.docstore.azurecosmosnosql import AzureCosmosNoSqlDocumentStore def test_class(): names_of_base_classes = [b.__name__ for b in AzureCosmosNoSqlDocumentStore.__mro__] assert KVDocumentStore.__name__ in names_o...
"""Argparser module for WorkerRuntime""" from jina import __default_host__, helper from jina.parsers.helper import KVAppendAction def mixin_base_runtime_parser(arg_group): """Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser. :param arg_group: the parser...
"""Argparser module for WorkerRuntime""" from jina import __default_host__, helper from jina.parsers.helper import KVAppendAction, add_arg_group def mixin_base_runtime_parser(arg_group): """Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser. :param arg_gro...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, ...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, ...
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.runner import BaseModule from mmdet.registry import MODELS from ...core import bbox_cxcywh_to_xyxy @MODELS.register_module() class EmbeddingRPNHead(BaseModule): """RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011...
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.runner import BaseModule from mmdet.models.builder import HEADS from ...core import bbox_cxcywh_to_xyxy @HEADS.register_module() class EmbeddingRPNHead(BaseModule): """RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/...