input
stringlengths
33
5k
output
stringlengths
32
5k
from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.embeddings.fastembed import FastEmbedEmbedding from pathlib import Path from unittest.mock import patch def test_class(): names_of_base_classes = [b.__name__ for b in FastEmbedEmbedding.__mro__] assert BaseEmbedding.__name__ in n...
from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.embeddings.fastembed import FastEmbedEmbedding def test_class(): names_of_base_classes = [b.__name__ for b in FastEmbedEmbedding.__mro__] assert BaseEmbedding.__name__ in names_of_base_classes
# ruff: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICE...
# ruff: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICE...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=1)) max_epochs = 24 param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=1000), dict( type...
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=1)) # optimizer optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser...
""" OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages. On the website, you can download parallel datasets for many languages in different formats. I found that the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal overhea...
""" OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages. On the website, you can download parallel datasets for many languages in different formats. I found that the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal overhea...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from typing import Dict, List, Optional import spacy from jina import DocumentArray, Executor, requests from jina_commons.batching import get_docs_batch_generator _EXCLUDE_COMPONENTS = [ '...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess from typing import Dict, List, Optional import spacy from jina import DocumentArray, Executor, requests from jina_commons.batching import get_docs_batch_generator _EXCLUDE_COMPONENTS = [ '...
import numpy as np import pytest from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class RescalingTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_rescaling_basics(self): self.run_layer_test( ...
import numpy as np import pytest from tensorflow import data as tf_data from keras.src import backend from keras.src import layers from keras.src import testing class RescalingTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_rescaling_basics(self): self.run_layer_test( ...
from __future__ import annotations import json import logging import re from re import Pattern from typing import Optional, Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from pyd...
from __future__ import annotations import json import logging import re from re import Pattern from typing import Optional, Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from pyd...
""" This script contains an example how to perform semantic search with Elasticsearch. As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions: https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs Questions are indexed to Elasticsearch together with their ...
""" This script contains an example how to perform semantic search with Elasticsearch. As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions: https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs Questions are indexed to Elasticsearch together with their ...
from typing import Any, Optional, Sequence from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType from tonic_validate.metrics.retrieval_precision_metric import ( RetrievalPrecisionMetric, ) from tonic_validate.service...
from typing import Any, Optional, Sequence from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType from tonic_validate.metrics.retrieval_precision_metric import ( RetrievalPrecisionMetric, ) from tonic_validate.service...
from urllib.parse import quote from backend.blocks.jina._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, JinaCredentials, JinaCredentialsField, JinaCredentialsInput, ) from backend.blocks.search import GetRequest from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema ...
from groq._utils._utils import quote from backend.blocks.jina._auth import ( TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, JinaCredentials, JinaCredentialsField, JinaCredentialsInput, ) from backend.blocks.search import GetRequest from backend.data.block import Block, BlockCategory, BlockOutput, BlockS...
from typing import TYPE_CHECKING, List from docarray.typing.tensor.abstract_tensor import AbstractTensor if TYPE_CHECKING: from docarray.array import DocumentArrayStacked from docarray.array.abstract_array import AnyDocumentArray class DocumentArraySummary: def __init__(self, da: 'AnyDocumentArray'): ...
from typing import TYPE_CHECKING, List from docarray.typing.tensor.abstract_tensor import AbstractTensor if TYPE_CHECKING: from docarray.array import DocumentArrayStacked from docarray.array.abstract_array import AnyDocumentArray class DocumentArraySummary: def __init__(self, da: 'AnyDocumentArray'): ...
import os from typing import Type import orjson from pydantic import BaseModel, Field, parse_obj_as from docarray.base_document.abstract_document import AbstractDocument from docarray.base_document.base_node import BaseNode from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode from docarray...
import os from typing import Type import orjson from pydantic import BaseModel, Field, parse_obj_as from docarray.base_document.abstract_document import AbstractDocument from docarray.base_document.base_node import BaseNode from docarray.base_document.io.json import orjson_dumps from docarray.base_document.mixins imp...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import numpy as np import pytest from docarray.utils._internal.misc import is_tf_available tf_available = is_tf_available() if tf_available: import tensorflow as tf from docarray.computation.tensorflow_backend import TensorFlowCompBackend from docarray.typing import TensorFlowTensor @pytest.mark.tensor...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64]. It generates sentence embeddings that can be compared using...
""" This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64]. It generates sentence embeddings that can be compared using...
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline parser = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSol...
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline parser = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSol...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.utils.parrots_wrapper import TORCH_VERSION from mmengine.utils.version_utils import digit_version from .distributed import MMDistributedDataParallel from .seperate_distributed import MMSeparateDistributedDataParallel from .utils import is_model_wrapper __al...
# Copyright (c) OpenMMLab. All rights reserved. from .distributed import MMDistributedDataParallel from .seperate_distributed import MMSeparateDistributedDataParallel from .utils import is_model_wrapper __all__ = [ 'MMDistributedDataParallel', 'is_model_wrapper', 'MMSeparateDistributedDataParallel' ]
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
_base_ = './ga-retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'...
_base_ = './ga_retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'...
# Copyright (c) OpenMMLab. All rights reserved. from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, OptimWrapperDict, build_optim_wrapper) # yapf: disable from .scheduler import (ConstantLR, Consta...
# Copyright (c) OpenMMLab. All rights reserved. from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, OptimWrapperDict, build_optim_wrapper) # yapf: disable from .scheduler import (ConstantLR, Consta...
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 __all__ = [ "EMFORMER_RNNT_BASE_MUSTC", "EMFORMER_RNNT_BASE_TEDLIUM3", ]
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 from .source_separation_pipeline import HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS __all__ = [ "EMFORMER_RNNT_BASE_MUSTC", "EMFORMER_RNNT_BASE_TEDLIUM3", "HDEMUCS_HIGH_MUSDB_PLUS", "HDEMUCS_HIGH_MUSDB", ]
_base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py' # learning policy max_epochs = 24 train_cfg = dict(max_epochs=max_epochs) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, ...
_base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py' # learning policy lr_config = dict(step=[20, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24)
from typing import Any, Dict from torchvision.prototype import datapoints from torchvision.prototype.transforms import functional as F, Transform from torchvision.prototype.transforms.utils import is_simple_tensor class UniformTemporalSubsample(Transform): _transformed_types = (is_simple_tensor, datapoints.Vide...
from typing import Any, Dict from torchvision.prototype import features from torchvision.prototype.transforms import functional as F, Transform class UniformTemporalSubsample(Transform): _transformed_types = (features.is_simple_tensor, features.Video) def __init__(self, num_samples: int, temporal_dim: int =...
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], ...
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False, pad_size_divisor=32) model = dict( # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. p...
"""Milvus reader.""" from typing import Any, Dict, List, Optional from uuid import uuid4 from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class MilvusReader(BaseReader): """Milvus reader.""" def __init__( self, host: str = "localhost", ...
"""Milvus reader.""" from typing import Any, Dict, List, Optional from uuid import uuid4 from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document class MilvusReader(BaseReader): """Milvus reader.""" def __init__( self, host: str = "localhost", ...
from __future__ import annotations from torch import Tensor, nn from sentence_transformers.cross_encoder import CrossEncoder # TODO: Consider the naming of this class class CrossEntropyLoss(nn.Module): def __init__(self, model: CrossEncoder) -> None: super().__init__() self.model = model ...
from __future__ import annotations import time from contextlib import ContextDecorator from torch import Tensor, nn from sentence_transformers.cross_encoder import CrossEncoder class timer(ContextDecorator): def __init__(self, name: str) -> None: self.name = name def __enter__(self) -> None: ...
"""This tool allows agents to generate images using Steamship. Steamship offers access to different third party image generation APIs using a single API key. Today the following models are supported: - Dall-E - Stable Diffusion To use this tool, you must first set as environment variables: STEAMSHIP_API_KEY ``` ...
"""This tool allows agents to generate images using Steamship. Steamship offers access to different third party image generation APIs using a single API key. Today the following models are supported: - Dall-E - Stable Diffusion To use this tool, you must first set as environment variables: STEAMSHIP_API_KEY ``` ...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import pytest from jina import Document, Flow from video_torch_encoder import VideoTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def kinects_v...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import pytest from jina import Document, Flow from ...video_torch_encoder import VideoTorchEncoder cur_dir = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture() def kinec...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn def accuracy(pred, target, topk=1, thresh=None): """Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class) target (torch.Tensor): The target of each ...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn @mmcv.jit(coderize=True) def accuracy(pred, target, topk=1, thresh=None): """Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class) targe...
_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' data_preprocessor = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False, pad_size_divisor=32) model = dict( # use caffe img_norm data_preprocessor=data_preprocessor, backbone=dict( norm_cfg=dict(requires_grad=False),...
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) model = dict( # use caffe img_norm preprocess_cfg=preprocess_cfg, backbone=dict( norm_cfg=dict(requires_grad=False), styl...
import csv import gzip import logging import math import os from datetime import datetime from torch.utils.data import DataLoader from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator #### Ju...
from torch.utils.data import DataLoader import math from sentence_transformers import models, losses from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator import logging from datetime import datetime import os ...
import random import time import pytest from jina import Client, Document, DocumentArray, Executor, Flow, requests @pytest.mark.parametrize('stream', [True, False]) @pytest.mark.parametrize('protocol', ['grpc']) def test_return_order_in_client(protocol, stream): class ExecutorRandomSleepExecutor(Executor): ...
import random import time import pytest from jina import Client, Document, DocumentArray, Executor, Flow, requests @pytest.mark.parametrize('protocol', ['grpc']) def test_return_order_in_client(protocol): class ExecutorRandomSleepExecutor(Executor): @requests def foo(self, *args, **kwargs): ...
import inspect from abc import ABC from functools import reduce from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union if TYPE_CHECKING: # pragma: no cover from jina.orchestrate.flow.base import Flow from jina.serve.executors import BaseExecutor class VersionedYAMLParser: """Flow YAML p...
import inspect from abc import ABC from functools import reduce from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union if TYPE_CHECKING: # pragma: no cover from jina.orchestrate.flow.base import Flow from jina.serve.executors import BaseExecutor class VersionedYAMLParser: """Flow YAML pa...
# Copyright (c) OpenMMLab. All rights reserved. import math from typing import Optional import torch import torch.nn as nn from mmengine.model import ExponentialMovingAverage from torch import Tensor from mmdet.registry import MODELS @MODELS.register_module() class ExpMomentumEMA(ExponentialMovingAverage): """E...
# Copyright (c) OpenMMLab. All rights reserved. import math from typing import Optional import torch import torch.nn as nn from mmengine.model import ExponentialMovingAverage from torch import Tensor from mmdet.registry import MODELS @MODELS.register_module() class ExpMomentumEMA(ExponentialMovingAverage): """E...
import copy import warnings from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class DownloadConfig: """Configuration for our cached path manager. Attributes: cache_dir (`str` or `Path`, *optional*): Specify a cache directory to ...
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class DownloadConfig: """Configuration for our cached path manager. Attributes: cache_dir (`str` or `Path`, *optional*): Specify a cache directory to save the file to...
# Copyright (c) OpenMMLab. All rights reserved. import logging import os.path as osp from argparse import ArgumentParser import mmcv from mmengine.config import Config from mmengine.logging import MMLogger from mmengine.utils import mkdir_or_exist from mmdet.apis import inference_detector, init_detector from mmdet.re...
# Copyright (c) OpenMMLab. All rights reserved. import logging import os.path as osp from argparse import ArgumentParser import mmcv from mmengine.config import Config from mmengine.logging import MMLogger from mmdet.apis import inference_detector, init_detector from mmdet.registry import VISUALIZERS from mmdet.utils...
from datetime import datetime import pytest from langchain_core.exceptions import OutputParserException from langchain.output_parsers.datetime import DatetimeOutputParser def test_datetime_output_parser_parse() -> None: parser = DatetimeOutputParser() # Test valid input date = datetime.now() datest...
from datetime import datetime from time import sleep from langchain.output_parsers.datetime import DatetimeOutputParser def test_datetime_output_parser_parse() -> None: parser = DatetimeOutputParser() # Test valid input date = datetime.now() datestr = date.strftime(parser.format) result = parser...
# mypy: allow-untyped-defs import functools from collections.abc import Hashable from dataclasses import dataclass, fields from typing import TypeVar from typing_extensions import dataclass_transform T = TypeVar("T", bound="_Union") class _UnionTag(str): __slots__ = ("_cls",) _cls: Hashable @staticmeth...
# mypy: allow-untyped-defs import functools from collections.abc import Hashable from dataclasses import dataclass, fields from typing import TypeVar from typing_extensions import dataclass_transform T = TypeVar("T", bound="_Union") class _UnionTag(str): __slots__ = ("_cls",) _cls: Hashable @staticmeth...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( data_preprocessor=dict(batch_augments=[ dict( type='BatchSyncRandomResize', random_size_range=(320, 640), size_divisor=32, interval=10) ]), backbone=dict(deepen_factor=0.33, widen_fa...
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( random_size_range=(10, 20), backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) img_scale = (640, 640) # height, width # f...
import json from pathlib import Path from typing import Any, Callable, Optional, Tuple import PIL.Image from .utils import download_and_extract_archive, verify_str_arg from .vision import VisionDataset class Food101(VisionDataset): """`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/foo...
import json from pathlib import Path from typing import Any, Callable, Optional, Tuple import PIL.Image from .utils import download_and_extract_archive, verify_str_arg from .vision import VisionDataset class Food101(VisionDataset): """`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/foo...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='BN', requires_grad=True) model = dict( data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.6...
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(...
""" This example computes the score between a query and all possible sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS). It output then the most similar sentences for the given query. """ from sentence_transformers.cross_encoder import CrossEncoder import numpy as np # Pre-trained cross ...
""" This example computes the score between a query and all possible sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS). It output then the most similar sentences for the given query. """ from sentence_transformers.cross_encoder import CrossEncoder import numpy as np # Pre-trained cross ...
import importlib import shutil import threading from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem _has_s3fs = importlib.util.find_spec("s3fs") is not None if _has_s3fs: from .s3filesystem import S3FileSystem # noqa: F401 COMPRESSION_FILE...
import importlib import threading from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem _has_s3fs = importlib.util.find_spec("s3fs") is not None if _has_s3fs: from .s3filesystem import S3FileSystem # noqa: F401 COMPRESSION_FILESYSTEMS: List[...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import fire from llama import Llama from typing import List def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, top_p...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.6, top_p: float = 0.9, max_...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.image import affine_transform from keras.src.ops.image import crop_images from keras.src.ops.image import extract_patches from keras.src.ops.image import hsv_to_rgb from keras.src...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.image import affine_transform from keras.src.ops.image import crop_images from keras.src.ops.image import extract_patches from keras.src.ops.image import hsv_to_rgb from keras.src...
import logging from collections import defaultdict from typing import Annotated, Any, Dict, List, Optional, Sequence from fastapi import APIRouter, Body, Depends, HTTPException from prisma.enums import AgentExecutionStatus, APIKeyPermission from typing_extensions import TypedDict import backend.data.block from backen...
import logging from collections import defaultdict from typing import Annotated, Any, Dict, List, Optional, Sequence from fastapi import APIRouter, Body, Depends, HTTPException from prisma.enums import AgentExecutionStatus, APIKeyPermission from typing_extensions import TypedDict import backend.data.block from backen...
# model settings preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) norm_cfg = dict(type='BN', requires_grad=False) model = dict( preprocess_cfg=preprocess_cfg, type='FasterRCNN', backbone=dict( type='ResNet', dept...
# model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, strides=(1, 2, 2, 1), dilations=(1, 1, 1, 2), out_indices=(3, ), frozen_stages=1, norm_cfg=norm_...
import functools import os import os.path import pathlib from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper from torchvision.prototype.datasets.utils import EncodedData, EncodedImage from torchvision...
import functools import os import os.path import pathlib from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.util...
from typing import Iterable, Dict, TYPE_CHECKING import numpy as np from docarray import DocumentArray from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin from docarray.array.storage.base.helper import Offset2ID from docarray.array.storage.milvus.backend import ( _always_true_expr, _ids_to_mi...
from typing import Iterable, Dict, TYPE_CHECKING import numpy as np from docarray import DocumentArray from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin from docarray.array.storage.base.helper import Offset2ID from docarray.array.storage.milvus.backend import ( _always_true_expr, _ids_to_mi...
# Copyright (c) OpenMMLab. All rights reserved. from .activations import SiLU from .bbox_nms import fast_nms, multiclass_nms from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .dropblock import DropBlock from .ema import ExpMom...
# Copyright (c) OpenMMLab. All rights reserved. from .activations import SiLU from .bbox_nms import fast_nms, multiclass_nms from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .dropblock import DropBlock from .ema import ExpMom...
# Copyright (c) OpenMMLab. All rights reserved. from .distributed_sampler import DistributedSampler from .group_sampler import DistributedGroupSampler, GroupSampler from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler __all__ = [ 'DistributedSampler', 'DistributedGroupSampler', 'GroupSampl...
# Copyright (c) OpenMMLab. All rights reserved. from .distributed_sampler import DistributedSampler from .group_sampler import DistributedGroupSampler, GroupSampler __all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler']
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
""" This is a simple application for sentence embeddings: semantic search We have a corpus with various sentences. Then, for a given query sentence, we want to find the most similar sentence in this corpus. This script outputs for various queries the top 5 most similar sentences in the corpus. """ import torch from...
""" This is a simple application for sentence embeddings: semantic search We have a corpus with various sentences. Then, for a given query sentence, we want to find the most similar sentence in this corpus. This script outputs for various queries the top 5 most similar sentences in the corpus. """ import torch from...
from __future__ import annotations from dataclasses import dataclass from sentence_transformers.training_args import SentenceTransformerTrainingArguments @dataclass class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments): r""" SparseEncoderTrainingArguments extends :class:`~SentenceTransf...
from __future__ import annotations from dataclasses import dataclass from sentence_transformers.training_args import SentenceTransformerTrainingArguments @dataclass class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments): """ SparseEncoderTrainingArguments extends :class:`~SentenceTransfo...
_base_ = './mask-rcnn_r50_fpn_1x_coco.py' model = dict( # use caffe img_norm data_preprocessor=dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False), backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( ...
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( # use caffe img_norm data_preprocessor=dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False), backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( ...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
_base_ = './solov2_r50_fpn_ms-3x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(checkpoint='torchvision://resnet101'), dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), mask_head=dic...
_base_ = 'solov2_r50_fpn_mstrain_3x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(checkpoint='torchvision://resnet101'), dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), mask_head=...
_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-270k_coco.py' # noqa # training schedule for 90k max_iters = 90000 # learning rate policy # lr steps at [0.9, 0.95, 0.975] of the maximum iterations param_scheduler = [ dict( type='LinearLR', start_factor=0.067, by_epoch=False, begin...
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py' # training schedule for 90k max_iters = 90000 # learning rate policy # lr steps at [0.9, 0.95, 0.975] of the maximum iterations param_scheduler = [ dict( type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500), ...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess import pytest from dpr_reader import DPRReaderRanker from jina import Document, DocumentArray, Flow @pytest.mark.parametrize('request_size', [1, 8, 50]) def test_integration(request_size: int...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import subprocess import pytest from jina import Document, DocumentArray, Flow from ...dpr_reader import DPRReaderRanker @pytest.mark.parametrize('request_size', [1, 8, 50]) def test_integration(request_size:...
from enum import Enum # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" APOLLO = "apollo" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" E2B = "e2b" EXA = "exa" FAL = "fal" GENERIC_WEBHOOK = "generic_webhook" GITHUB = "github" GOOGL...
from enum import Enum # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" APOLLO = "apollo" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" E2B = "e2b" EXA = "exa" FAL = "fal" GITHUB = "github" GOOGLE = "google" GOOGLE_MAPS = "google_m...
from __future__ import annotations __version__ = "3.2.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset from sentence_t...
from __future__ import annotations __version__ = "3.1.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset from sentence_t...
import openvino.runtime.opset14 as ov_opset from openvino import Type from keras.src.backend.openvino.core import OpenVINOKerasTensor from keras.src.backend.openvino.core import get_ov_output def segment_sum(data, segment_ids, num_segments=None, sorted=False): raise NotImplementedError( "`segment_sum` is...
import openvino.runtime.opset14 as ov_opset from openvino import Type from keras.src.backend.openvino.core import OpenVINOKerasTensor from keras.src.backend.openvino.core import get_ov_output def segment_sum(data, segment_ids, num_segments=None, sorted=False): raise NotImplementedError( "`segment_sum` is...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], ...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings preprocess_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, pad_size_divisor=32) model = dict( preprocess_cfg=prepr...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.activations import deserialize from keras.src.activations import get from keras.src.activations import serialize from keras.src.activations.activations import celu from keras.src.acti...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.activations import deserialize from keras.src.activations import get from keras.src.activations import serialize from keras.src.activations.activations import celu from keras.src.acti...
"""Valyu tool spec.""" from typing import List, Optional from llama_index.core.schema import Document from llama_index.core.tools.tool_spec.base import BaseToolSpec class ValyuToolSpec(BaseToolSpec): """Valyu tool spec.""" spec_functions = [ "context", ] def __init__( self, ...
"""Valyu tool spec.""" from typing import List, Optional from llama_index.core.schema import Document from llama_index.core.tools.tool_spec.base import BaseToolSpec class ValyuToolSpec(BaseToolSpec): """Valyu tool spec.""" spec_functions = [ "context", ] def __init__( self, ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2 from keras.src.applications.mobilenet_v2 import ( decode_predictions as decode_predictions, ) from keras.src.applicatio...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.mobilenet_v2 import MobileNetV2 from keras.src.applications.mobilenet_v2 import decode_predictions from keras.src.applications.mobilenet_v2 import preprocess_input
# Copyright (c) OpenMMLab. All rights reserved. import logging from contextlib import contextmanager from typing import Optional import torch from mmengine import print_log from mmengine.utils import TORCH_VERSION, digit_version @contextmanager def autocast(device_type: Optional[str] = None, dtype: Opt...
# Copyright (c) OpenMMLab. All rights reserved. from contextlib import contextmanager import torch from mmengine.utils import TORCH_VERSION, digit_version @contextmanager def autocast(enabled: bool = True, **kwargs): """A wrapper of ``torch.autocast`` and ``toch.cuda.amp.autocast``. Pytorch 1.6.0 provide `...
from typing import TYPE_CHECKING, Type, Optional if TYPE_CHECKING: # pragma: no cover from docarray.typing import T from docarray.proto.docarray_pb2 import DocumentProto class ProtobufMixin: @classmethod def from_protobuf(cls: Type['T'], pb_msg: 'DocumentProto') -> 'T': from docarray.proto.i...
from typing import TYPE_CHECKING, Type, Optional if TYPE_CHECKING: from docarray.typing import T from docarray.proto.docarray_pb2 import DocumentProto class ProtobufMixin: @classmethod def from_protobuf(cls: Type['T'], pb_msg: 'DocumentProto') -> 'T': from docarray.proto.io import parse_proto...
""" Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning. It is available for more than 300 languages. This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like """ import os import sentence_transformers imp...
""" Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning. It is available for more than 300 languages. This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like """ import os import sentence_transformers impo...
# Copyright (c) OpenMMLab. All rights reserved. import os.path from typing import Optional import mmengine from mmdet.registry import DATASETS from .coco import CocoDataset @DATASETS.register_module() class V3DetDataset(CocoDataset): """Dataset for V3Det.""" METAINFO = { 'classes': None, 'p...
# Copyright (c) OpenMMLab. All rights reserved. import mmengine from mmdet.registry import DATASETS from .coco import CocoDataset V3DET_CLASSES = tuple( mmengine.list_from_file( 'configs/v3det/category_name_13204_v3det_2023_v1.txt')) @DATASETS.register_module() class V3DetDataset(CocoDataset): """D...
""" This is a simple application for sentence embeddings: clustering Sentences are mapped to sentence embeddings and then k-mean clustering is applied. """ from sentence_transformers import SentenceTransformer from sklearn.cluster import KMeans embedder = SentenceTransformer("all-MiniLM-L6-v2") # Corpus with exampl...
""" This is a simple application for sentence embeddings: clustering Sentences are mapped to sentence embeddings and then k-mean clustering is applied. """ from sentence_transformers import SentenceTransformer from sklearn.cluster import KMeans embedder = SentenceTransformer("all-MiniLM-L6-v2") # Corpus with example...
""" Use scikit-learn regressor interface with CPU histogram tree method =================================================================== """ from dask import array as da from dask.distributed import Client, LocalCluster from xgboost import dask as dxgb def main(client): # generate some random data for demons...
""" Use scikit-learn regressor interface with CPU histogram tree method =================================================================== """ from dask import array as da from dask.distributed import Client, LocalCluster from xgboost import dask as dxgb def main(client): # generate some random data for demonst...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import pytest import logging @pytest.fixture(autouse=True) def set_logger_level(): logger = logging.getLogger('docarray') logger.setLevel(logging.INFO)
__version__ = '0.13.25' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
__version__ = '0.13.24' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional # usort: skip from ._transform import Transform # usort: skip from ._augment import CutMix, MixUp, RandomErasing from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide from ._col...
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional, utils # usort: skip from ._transform import Transform # usort: skip from ._augment import CutMix, MixUp, RandomErasing from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide fro...
from .Asym import Asym from .BoW import BoW from .CLIPModel import CLIPModel from .CNN import CNN from .Dense import Dense from .Dropout import Dropout from .LayerNorm import LayerNorm from .LSTM import LSTM from .Normalize import Normalize from .Pooling import Pooling from .Transformer import Transformer from .Weighte...
from .Transformer import Transformer from .Asym import Asym from .BoW import BoW from .CNN import CNN from .Dense import Dense from .Dropout import Dropout from .LayerNorm import LayerNorm from .LSTM import LSTM from .Normalize import Normalize from .Pooling import Pooling from .WeightedLayerPooling import WeightedLaye...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.dtype_policies import deserialize as deserialize from keras.src.dtype_policies import get as get from keras.src.dtype_policies import serialize as serialize from keras.src.dtype_polic...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.dtype_policies import deserialize from keras.src.dtype_policies import get from keras.src.dtype_policies import serialize from keras.src.dtype_policies.dtype_policy import DTypePolicy...
import asyncio import pytest from llama_index.core.workflow.context import Context from llama_index.core.workflow.decorators import step from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError from llama_index.core.workflow.events import Event, StartEvent, StopEvent from llama_index.cor...
import asyncio import pytest from llama_index.core.workflow.context import Context from llama_index.core.workflow.decorators import step from llama_index.core.workflow.events import Event, StartEvent, StopEvent from llama_index.core.workflow.workflow import Workflow from llama_index.core.workflow.errors import Workflo...
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet101_caffe'))) # dataset settings train_pipeline = [ dict( type='LoadImageFromFi...
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet101_caffe'))) # dataset settings train_pipeline = [ dict( type='LoadImageFromFi...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# Copyright (c) OpenMMLab. All rights reserved. from .misc import (check_prerequisites, concat_list, deprecated_api_warning, has_method, import_modules_from_strings, is_list_of, is_method_overridden, is_seq_of, is_str, is_tuple_of, iter_cast, list_cast, mmcv_full...
# Copyright (c) OpenMMLab. All rights reserved. from .misc import (check_prerequisites, concat_list, deprecated_api_warning, has_method, import_modules_from_strings, is_list_of, is_method_overridden, is_seq_of, is_str, is_tuple_of, iter_cast, list_cast, requires_...
"""Utilities for chat loaders.""" from copy import deepcopy from typing import Iterable, Iterator, List from langchain_core.chat_sessions import ChatSession from langchain_core.messages import AIMessage, BaseMessage def merge_chat_runs_in_session( chat_session: ChatSession, delimiter: str = "\n\n" ) -> ChatSess...
"""Utilities for chat loaders.""" from copy import deepcopy from typing import Iterable, Iterator, List from langchain_core.chat_sessions import ChatSession from langchain_core.messages import AIMessage, BaseMessage def merge_chat_runs_in_session( chat_session: ChatSession, delimiter: str = "\n\n" ) -> ChatSess...
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py' model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py' model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) # use ca...
"""Run smoke tests""" import sys from pathlib import Path import torch import torchvision from torchvision.io import decode_jpeg, read_file, read_image from torchvision.models import resnet50, ResNet50_Weights SCRIPT_DIR = Path(__file__).parent def smoke_test_torchvision() -> None: print( "Is torchvisi...
"""Run smoke tests""" import sys from pathlib import Path import torch import torchvision from torchvision.io import decode_jpeg, read_file, read_image from torchvision.models import resnet50, ResNet50_Weights SCRIPT_DIR = Path(__file__).parent def smoke_test_torchvision() -> None: print( "Is torchvisi...
"""Tool for the Reddit search API.""" from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper class RedditSearchSch...
"""Tool for the Reddit search API.""" from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper class RedditSearchSch...
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 from .source_separation_pipeline import HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS __all__ = [ "EMFORMER_RNNT_BASE_MUSTC", "EMFORMER_RNNT_BASE_TEDLIUM3", "HDEMUCS_HIGH_MUSDB_PLUS", "HDEMUCS_HIGH_MUSDB", ]
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 from .source_separation_pipeline import ( CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS, SourceSeparationBundle, ) __all__ = [ "CONVTASNET_BASE_LIBRI2MIX", "EMFORMER_RNNT_BASE_MUSTC", "...
import copy import os.path as osp import unittest from mmcv.transforms import Compose from mmdet.datasets.transforms import MultiBranch from mmdet.utils import register_all_modules register_all_modules() class TestMultiBranch(unittest.TestCase): def setUp(self): """Setup the model and optimizer which ...
import copy import os.path as osp import unittest from mmcv.transforms import Compose from mmdet.datasets.transforms import MultiBranch from mmdet.utils import register_all_modules register_all_modules() class TestMultiBranch(unittest.TestCase): def setUp(self): """Setup the model and optimizer which ...
import logging import tqdm class LoggingHandler(logging.Handler): def __init__(self, level=logging.NOTSET) -> None: super().__init__(level) def emit(self, record) -> None: try: msg = self.format(record) tqdm.tqdm.write(msg) self.flush() except (Key...
import logging import tqdm class LoggingHandler(logging.Handler): def __init__(self, level=logging.NOTSET): super().__init__(level) def emit(self, record): try: msg = self.format(record) tqdm.tqdm.write(msg) self.flush() except (KeyboardInterrupt, ...
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' # model settings preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) model = dict( preprocess_cfg=preprocess_cfg, backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback...
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_c...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16 from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode T = T...
from typing import TypeVar from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16 from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode T = TypeVar('T', bound='AudioTorchTensor') class AudioTorchTen...
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR from ._transforms import ( AmplitudeToDB, BarkScale, BarkSpectrogram, ComputeDeltas, Fade, FrequencyMasking, GriffinLim, InverseBarkScale, InverseMelScale, InverseSpectrogram, LFCC, Loudness, MelScale, Mel...
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR from ._transforms import ( AmplitudeToDB, ComputeDeltas, Fade, FrequencyMasking, GriffinLim, InverseMelScale, InverseSpectrogram, LFCC, Loudness, MelScale, MelSpectrogram, MFCC, MuLawDecoding, MuLawEncodin...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn from mmdet.registry import MODELS from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.0...
from google.protobuf import __version__ as __pb__version__ if __pb__version__.startswith('4'): from docarray.proto.pb.docarray_pb2 import ( DocumentArrayProto, DocumentArrayStackedProto, DocumentProto, NdArrayProto, NodeProto, ) else: from docarray.proto.pb2.docarray...
from google.protobuf import __version__ as __pb__version__ if __pb__version__.startswith('4'): from docarray.proto.pb.docarray_pb2 import ( DocumentArrayProto, DocumentArrayStackedProto, DocumentProto, NdArrayProto, NodeProto, UnionArrayProto, ) else: from do...
import pytest import torch import torchaudio class GreedyCTCDecoder(torch.nn.Module): def __init__(self, labels, blank: int = 0): super().__init__() self.blank = blank self.labels = labels def forward(self, logits: torch.Tensor) -> str: """Given a sequence logits over labels, ...
import pytest import torch from torchaudio._internal import download_url_to_file class GreedyCTCDecoder(torch.nn.Module): def __init__(self, labels, blank: int = 0): super().__init__() self.blank = blank self.labels = labels def forward(self, logits: torch.Tensor) -> str: """G...
import os import pandas as pd from huggingface_hub import hf_hub_download, upload_file from huggingface_hub.utils import EntryNotFoundError REPO_ID = "diffusers/benchmarks" def has_previous_benchmark() -> str: from run_all import FINAL_CSV_FILENAME csv_path = None try: csv_path = hf_hub_downlo...
import glob import sys import pandas as pd from huggingface_hub import hf_hub_download, upload_file from huggingface_hub.utils import EntryNotFoundError sys.path.append(".") from utils import BASE_PATH, FINAL_CSV_FILE, GITHUB_SHA, REPO_ID, collate_csv # noqa: E402 def has_previous_benchmark() -> str: csv_path...
""" ===================================== Plot the support vectors in LinearSVC ===================================== Unlike SVC (based on LIBSVM), LinearSVC (based on LIBLINEAR) does not provide the support vectors. This example demonstrates how to obtain the support vectors in LinearSVC. """ # Authors: The scikit-...
""" ===================================== Plot the support vectors in LinearSVC ===================================== Unlike SVC (based on LIBSVM), LinearSVC (based on LIBLINEAR) does not provide the support vectors. This example demonstrates how to obtain the support vectors in LinearSVC. """ # Authors: The scikit-...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from enum import Enum # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" E2B = "e2b" EXA = "exa" FAL = "fal" GITHUB = "github" GOOGLE = "google" GOOGLE_MAPS = "google_maps" GROQ = "groq"...
from enum import Enum # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" E2B = "e2b" EXA = "exa" FAL = "fal" GITHUB = "github" GOOGLE = "google" GOOGLE_MAPS = "google_maps" GROQ = "groq"...