input
stringlengths
33
5k
output
stringlengths
32
5k
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine.config import Config from mmengine.structures import InstanceData from mmdet.models.dense_heads import YOLOV3Head class TestYOLOV3Head(TestCase): def test_yolo_head_loss(self): """Tests YOLO head lo...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine.config import Config from mmengine.data import InstanceData from mmdet.models.dense_heads import YOLOV3Head class TestYOLOV3Head(TestCase): def test_yolo_head_loss(self): """Tests YOLO head loss whe...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .single_stage import SingleStageDetector @MODELS.register_module() class CornerNet(SingleStageDetector): """CornerNet. This detector is the implementation o...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class CornerNet(SingleStageDetector): """CornerNet. This detector is the implementat...
"""Hypothetical Document Embeddings. https://arxiv.org/abs/2212.10496 """ from __future__ import annotations import logging from typing import Any, Optional from langchain_core.callbacks import CallbackManagerForChainRun from langchain_core.embeddings import Embeddings from langchain_core.language_models import Bas...
"""Hypothetical Document Embeddings. https://arxiv.org/abs/2212.10496 """ from __future__ import annotations import logging from typing import Any, Dict, List, Optional from langchain_core.callbacks import CallbackManagerForChainRun from langchain_core.embeddings import Embeddings from langchain_core.language_model...
from langchain_core._api.deprecation import ( LangChainDeprecationWarning, LangChainPendingDeprecationWarning, deprecated, suppress_langchain_deprecation_warning, surface_langchain_deprecation_warnings, warn_deprecated, ) AGENT_DEPRECATION_WARNING = ( "LangChain agents will continue to be s...
from langchain_core._api.deprecation import ( LangChainDeprecationWarning, LangChainPendingDeprecationWarning, deprecated, suppress_langchain_deprecation_warning, surface_langchain_deprecation_warnings, warn_deprecated, ) AGENT_DEPRECATION_WARNING = ( "LangChain agents will continue to be s...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Callable from sentence_transformers.evaluation import InformationRetrievalEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.similarity_functions import SimilarityFunc...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Callable from sentence_transformers.evaluation import InformationRetrievalEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.similarity_functions import SimilarityFunc...
""" This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch. In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept in memory even for massive datasets: it takes (num_dimensions * num_do...
""" This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch. In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept in memory even for massive datasets: it takes (num_dimensions * num_do...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from .sparse_rcnn import SparseRCNN @MODELS.register_module() class QueryInst(SparseRCNN): r"""Implementation of `Instances as Queries <http://arxiv.org/abs/2105.01928>`_""" def __init__(self, backbone, ...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .sparse_rcnn import SparseRCNN @DETECTORS.register_module() class QueryInst(SparseRCNN): r"""Implementation of `Instances as Queries <http://arxiv.org/abs/2105.01928>`_""" def __init__(self, backbone, ...
# Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writ...
# Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writ...
_base_ = 'ssj_270k_coco-instance.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_...
_base_ = 'ssj_270k_coco-instance.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) file_client_args = dict(backend='disk') # comment out the code below to use different file client # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # ...
import json from json import JSONDecodeError from typing import Union from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish from langchain_core.exceptions import OutputParserException from langchain_core.messages import ( AIMessage, BaseMessage, ) from langchain_core.outputs import ...
import json from json import JSONDecodeError from typing import Union from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish from langchain_core.exceptions import OutputParserException from langchain_core.messages import ( AIMessage, BaseMessage, ) from langchain_core.outputs import ...
from keras.src.api_export import keras_export from keras.src.optimizers.adadelta import Adadelta from keras.src.optimizers.adafactor import Adafactor from keras.src.optimizers.adagrad import Adagrad from keras.src.optimizers.adam import Adam from keras.src.optimizers.adamax import Adamax from keras.src.optimizers.adamw...
from keras.src.api_export import keras_export from keras.src.optimizers.adadelta import Adadelta from keras.src.optimizers.adafactor import Adafactor from keras.src.optimizers.adagrad import Adagrad from keras.src.optimizers.adam import Adam from keras.src.optimizers.adamax import Adamax from keras.src.optimizers.adamw...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
"""DashScope api utils.""" from http import HTTPStatus from typing import Any, Dict, List, Sequence from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, CompletionResponse, ) def dashscope_response_to_completion_response( response: Any, stream: bool = False ) -> CompletionRespon...
"""DashScope api utils.""" from http import HTTPStatus from typing import Any, Dict, List, Sequence from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, CompletionResponse, ) def dashscope_response_to_completion_response( response: Any, stream: bool = False ) -> CompletionRespon...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseAssigner(metaclass=ABCMeta): """Base assigner that assigns boxes to ground truth boxes.""" @abstractmethod def assign(self, pred_instances, gt_instances, gt_insta...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseAssigner(metaclass=ABCMeta): """Base assigner that assigns boxes to ground truth boxes.""" @abstractmethod def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign boxes ...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.image.image_tensor import ImageTensor f...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.image.image_tensor import ImageTensor ...
# Copyright (c) OpenMMLab. All rights reserved. from .default_scope import DefaultScope from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RU...
# Copyright (c) OpenMMLab. All rights reserved. from .default_scope import DefaultScope from .registry import Registry, build_from_cfg from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS, RU...
from langchain_core.messages import __all__ EXPECTED_ALL = [ "MessageLikeRepresentation", "_message_from_dict", "AIMessage", "AIMessageChunk", "AnyMessage", "BaseMessage", "BaseMessageChunk", "ChatMessage", "ChatMessageChunk", "FunctionMessage", "FunctionMessageChunk", "...
from langchain_core.messages import __all__ EXPECTED_ALL = [ "MessageLikeRepresentation", "_message_from_dict", "AIMessage", "AIMessageChunk", "AnyMessage", "BaseMessage", "BaseMessageChunk", "ChatMessage", "ChatMessageChunk", "FunctionMessage", "FunctionMessageChunk", "...
_base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py' # learning policy max_epochs = 24 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', ...
_base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
import numpy as np from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling def main(): # Initialize the SPLADE model model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/effici...
import numpy as np from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling def main(): # Initialize the SPLADE model model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/effici...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import numpy as np import pytest from jina import Document, DocumentArray, Flow from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import export_dump_stream...
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import numpy as np import pytest from jina import Document, DocumentArray, Flow from jina.executors.metas import get_default_metas from jina_commons.indexers.dump import export_dump_stream...
# Copyright (c) OpenMMLab. All rights reserved. # flake8: noqa from .config import * from .dataset import * from .data import * from .fileio import * from .registry import * from .utils import *
# Copyright (c) OpenMMLab. All rights reserved. # flake8: noqa from .config import * from .dataset import * from .fileio import * from .registry import * from .utils import *
""" This script contains an example how to perform semantic search with Elasticsearch. You need Elasticsearch up and running locally: https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.rea...
import time from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch # 1. Load the quora corpus with questions dataset = load_dataset("quora", split="train", trust_remote_code=True).map( lambda ...
"""Transformers for missing value imputation.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import typing from ._base import MissingIndicator, SimpleImputer from ._knn import KNNImputer if typing.TYPE_CHECKING: # Avoid errors in type checkers (e.g. mypy) for experimental esti...
"""Transformers for missing value imputation.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import typing from ._base import MissingIndicator, SimpleImputer from ._knn import KNNImputer if typing.TYPE_CHECKING: # Avoid errors in type checkers (e.g. mypy) for experimental esti...
from typing import Dict def get_default_metas() -> Dict: """ Get a copy of default meta variables. NOTE: DO NOT ADD MORE ENTRIES HERE! :return: a deep copy of the default metas in a new dict """ # NOTE: DO NOT ADD MORE ENTRIES HERE! return { 'name': '', #: a string, the name of...
from typing import Dict def get_default_metas() -> Dict: """ Get a copy of default meta variables. NOTE: DO NOT ADD MORE ENTRIES HERE! :return: a deep copy of the default metas in a new dict """ # NOTE: DO NOT ADD MORE ENTRIES HERE! return { 'name': '', #: a string, the name of...
# Copyright (c) OpenMMLab. All rights reserved. from .anchor_free_head import AnchorFreeHead from .anchor_head import AnchorHead from .atss_head import ATSSHead from .autoassign_head import AutoAssignHead from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead from .centernet_head import CenterNetHead from .c...
# Copyright (c) OpenMMLab. All rights reserved. from .anchor_free_head import AnchorFreeHead from .anchor_head import AnchorHead from .atss_head import ATSSHead from .autoassign_head import AutoAssignHead from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead from .centernet_head import CenterNetHead from .c...
from dataclasses import dataclass from functools import partial from typing import Callable import torch import torchaudio from torchaudio.prototype.models import conv_tasnet_base, hdemucs_high @dataclass class SourceSeparationBundle: """torchaudio.prototype.pipelines.SourceSeparationBundle() Dataclass tha...
from dataclasses import dataclass from functools import partial from typing import Callable import torch import torchaudio from torchaudio.prototype.models import conv_tasnet_base @dataclass class SourceSeparationBundle: """torchaudio.prototype.pipelines.SourceSeparationBundle() Dataclass that bundles comp...
""" ======================================================= Label Propagation circles: Learning a complex structure ======================================================= Example of LabelPropagation learning a complex internal structure to demonstrate "manifold learning". The outer circle should be labeled "red" and ...
""" ============================================== Label Propagation learning a complex structure ============================================== Example of LabelPropagation learning a complex internal structure to demonstrate "manifold learning". The outer circle should be labeled "red" and the inner circle "blue". Be...
# mypy: allow-untyped-defs import functools from typing import Optional import torch from torch._C import _len_torch_function_stack from torch.overrides import _pop_mode, _push_mode, TorchFunctionMode from torch.utils._contextlib import context_decorator CURRENT_DEVICE: Optional[torch.device] = None @functools.lru...
# mypy: allow-untyped-defs import functools from typing import Optional import torch from torch._C import _len_torch_function_stack from torch.overrides import _pop_mode, _push_mode, TorchFunctionMode from torch.utils._contextlib import context_decorator CURRENT_DEVICE: Optional[torch.device] = None @functools.lru...
""" Wrapper script to run a command inside a Docker container """ import argparse import grp import itertools import os import pathlib import pwd import subprocess import sys import textwrap OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent PROJECT_ROOT_DIR = OPS_DIR.parent LINEWIDTH = 88 TEXT_WRAPPER = ...
""" Wrapper script to run a command inside a Docker container """ import argparse import grp import itertools import os import pathlib import pwd import subprocess import sys import textwrap OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent PROJECT_ROOT_DIR = OPS_DIR.parent LINEWIDTH = 88 TEXT_WRAPPER = ...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.transforms import LoadImageFromFile from mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations from mmdet.registry import TRANSFORMS def get_loading_pipeline(pipeline): """Only keep loading image and annotations related configuration....
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.transforms import LoadImageFromFile from mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations from mmdet.registry import TRANSFORMS def get_loading_pipeline(pipeline): """Only keep loading image and annotations related configuration....
# Copyright 2025 Kakao Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless requi...
# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless requi...
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: docarray.proto """Generated protocol buffer code.""" from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool...
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: docarray.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_d...
_base_ = './cascade-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './cascade_rcnn_r50_fpn_lsj_200e_8x8_fp16_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
""" This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages. The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages. The parallel sentences corpus cann...
""" This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages. The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages. The parallel sentences corpus cann...
from ..extension import _load_library try: _load_library("gpu_decoder") _HAS_GPU_VIDEO_DECODER = True except (ImportError, OSError): _HAS_GPU_VIDEO_DECODER = False
from ..extension import _load_library try: _load_library("Decoder") _HAS_GPU_VIDEO_DECODER = True except (ImportError, OSError): _HAS_GPU_VIDEO_DECODER = False
"""Utilities for JSON Schema.""" from __future__ import annotations from copy import deepcopy from typing import TYPE_CHECKING, Any, Optional if TYPE_CHECKING: from collections.abc import Sequence def _retrieve_ref(path: str, schema: dict) -> dict: components = path.split("/") if components[0] != "#": ...
"""Utilities for JSON Schema.""" from __future__ import annotations from copy import deepcopy from typing import TYPE_CHECKING, Any, Optional if TYPE_CHECKING: from collections.abc import Sequence def _retrieve_ref(path: str, schema: dict) -> dict: components = path.split("/") if components[0] != "#": ...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_tsdae_from_file.py path/to/sentences.txt """ import gzip ...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_tsdae_from_file.py path/to/sentences.txt """ from sentence...
import torch from torch import Tensor def sgd_out_of_place(param, grad, weight_decay, lr, maximize) -> Tensor: """ Computes a single step of SGD on a single parameter Tensor with grad. Assumes: - param and grad are the same shape and are 1D. - param and grad are float and on CPU Args: ...
import torch from torch import Tensor def sgd_out_of_place(param, grad, weight_decay, lr, maximize) -> Tensor: """ Computes a single step of SGD on a single parameter Tensor with grad. Assumes: - param and grad are the same shape and are 1D. - param and grad are float and on CPU Args: ...
from dataclasses import dataclass from typing import Callable, Optional import datasets @dataclass class GeneratorConfig(datasets.BuilderConfig): generator: Optional[Callable] = None gen_kwargs: Optional[dict] = None features: Optional[datasets.Features] = None split: datasets.NamedSplit = datasets.S...
from dataclasses import dataclass from typing import Callable, Optional import datasets @dataclass class GeneratorConfig(datasets.BuilderConfig): generator: Optional[Callable] = None gen_kwargs: Optional[dict] = None features: Optional[datasets.Features] = None def __post_init__(self): super...
_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py' model = dict( data_preprocessor=dict( mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False)) train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnot...
_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py' model = dict( data_preprocessor=dict( mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False)) train_pipeline = [ dict( type='LoadImageFromFile', file_client_args={{_base_.file_client_args}}),...
"""Test tool spec.""" from typing import List, Tuple, Union import pytest from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.tools.tool_spec.base import BaseToolSpec from llama_index.core.tools.types import ToolMetadata from llama_index.core.workflow import Context class FooSchema(BaseMode...
"""Test tool spec.""" from typing import List, Optional, Tuple, Type, Union import pytest from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.tools.tool_spec.base import BaseToolSpec from llama_index.core.tools.types import ToolMetadata class FooSchema(BaseModel): arg1: str arg2: in...
"""This modules defines all kinds of exceptions raised in Jina.""" from typing import Set, Union import grpc.aio from jina.serve.helper import extract_trailing_metadata class BaseJinaException(BaseException): """A base class for all exceptions raised by Jina""" class RuntimeFailToStart(SystemError, BaseJinaEx...
"""This modules defines all kinds of exceptions raised in Jina.""" from typing import Set, Union import grpc.aio class BaseJinaException(BaseException): """A base class for all exceptions raised by Jina""" class RuntimeFailToStart(SystemError, BaseJinaException): """When pod/deployment is failed to started...
"""Loads Microsoft Excel files.""" from pathlib import Path from typing import Any, List, Union from langchain_community.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) class UnstructuredXMLLoader(UnstructuredFileLoader): """Load `XML` file using `Unstruct...
"""Loads Microsoft Excel files.""" from pathlib import Path from typing import Any, List, Union from langchain_community.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) class UnstructuredXMLLoader(UnstructuredFileLoader): """Load `XML` file using `Unstruct...
"""Documents module. **Document** module is a collection of classes that handle documents and their transformations. """ from typing import TYPE_CHECKING from langchain_core._import_utils import import_attr if TYPE_CHECKING: from .base import Document from .compressor import BaseDocumentCompressor from...
"""Documents module. **Document** module is a collection of classes that handle documents and their transformations. """ from typing import TYPE_CHECKING from langchain_core._import_utils import import_attr if TYPE_CHECKING: from .base import Document from .compressor import BaseDocumentCompressor from...
# Copyright (c) OpenMMLab. All rights reserved. from .base_data_element import BaseDataElement from .base_data_sample import BaseDataSample from .sampler import DefaultSampler, InfiniteSampler __all__ = [ 'BaseDataElement', 'BaseDataSample', 'DefaultSampler', 'InfiniteSampler' ]
# Copyright (c) OpenMMLab. All rights reserved. from .sampler import DefaultSampler, InfiniteSampler __all__ = ['DefaultSampler', 'InfiniteSampler']
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.roi_heads.mask_heads import FCNMaskHead, MaskIoUHead from .utils import _dummy_bbox_sampling def test_mask_head_loss(): """Test mask head loss when mask target is empty.""" self = FCNMaskHead( num_convs=1, ...
import mmcv import torch from mmdet.models.roi_heads.mask_heads import FCNMaskHead, MaskIoUHead from .utils import _dummy_bbox_sampling def test_mask_head_loss(): """Test mask head loss when mask target is empty.""" self = FCNMaskHead( num_convs=1, roi_feat_size=6, in_channels=8, ...
# Copyright (c) OpenMMLab. All rights reserved. from .lr_scheduler import (ConstantLR, CosineAnnealingLR, ExponentialLR, LinearLR, MultiStepLR, PolyLR, StepLR) from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum, ExponentialMomentum, Lin...
# Copyright (c) OpenMMLab. All rights reserved. from .lr_scheduler import (ConstantLR, CosineAnnealingLR, ExponentialLR, LinearLR, MultiStepLR, StepLR) from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum, ExponentialMomentum, LinearMomen...
# Copyright (c) OpenMMLab. All rights reserved. from .base_dataset import BaseDataset, Compose, force_full_init from .dataset_wrapper import ClassBalancedDataset, ConcatDataset, RepeatDataset from .sampler import DefaultSampler, InfiniteSampler from .utils import pseudo_collate, worker_init_fn __all__ = [ 'BaseDat...
# Copyright (c) OpenMMLab. All rights reserved. # flake8: noqa from .base_dataset import BaseDataset, Compose, force_full_init from .dataset_wrapper import ClassBalancedDataset, ConcatDataset, RepeatDataset
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
from __future__ import annotations from typing_extensions import deprecated from sentence_transformers import InputExample from sentence_transformers.cross_encoder.evaluation.CEClassificationEvaluator import CEClassificationEvaluator @deprecated( "This evaluator has been deprecated in favor of the more general ...
from __future__ import annotations import csv import logging import os import numpy as np from sentence_transformers import InputExample logger = logging.getLogger(__name__) class CESoftmaxAccuracyEvaluator: """ This evaluator can be used with the CrossEncoder class. It is designed for CrossEncoders ...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow T = TypeVar('T', bound='ImageTensorFlowTensor') @_register_pr...
from typing import TypeVar from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow T = TypeVar('T', bound='ImageTensorFlowTensor') @_register_pr...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.22.0" SCIPY_MIN_VERSION = "1.8.0" JOBLIB_MIN_VERSION = "1...
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.19.5" SCIPY_MIN_VERSION = "1.6.0" JOBLIB_MIN_VERSION = "1...
"""Toolkit for interacting with a vector store.""" from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool from langchain_core.tools.base import BaseToolkit from langchain_core.vectorstores import VectorStore from pydantic import BaseModel, ConfigDict, Field class Vecto...
"""Toolkit for interacting with a vector store.""" from typing import List from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool from langchain_core.tools.base import BaseToolkit from langchain_core.vectorstores import VectorStore from pydantic import BaseModel, Config...
"""**Prompt** is the input to the model. Prompt is often constructed from multiple components and prompt values. Prompt classes and functions make constructing and working with prompts easy. **Class hierarchy:** .. code-block:: BasePromptTemplate --> PipelinePromptTemplate StringProm...
"""**Prompt** is the input to the model. Prompt is often constructed from multiple components and prompt values. Prompt classes and functions make constructing and working with prompts easy. **Class hierarchy:** .. code-block:: BasePromptTemplate --> PipelinePromptTemplate StringProm...
from typing import Dict, Iterable import torch from torch import Tensor, nn from sentence_transformers import util from sentence_transformers.SentenceTransformer import SentenceTransformer class CoSENTLoss(nn.Module): def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwi...
import torch from torch import nn, Tensor from typing import Iterable, Dict from ..SentenceTransformer import SentenceTransformer from .. import util class CoSENTLoss(nn.Module): def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim): """ This cla...
from docarray import DocumentArray from jina import Executor, requests from jina.parsers import set_pod_parser class ProcessExecutor(Executor): @requests(on='/') def process(self, docs: DocumentArray, **kwargs): for doc in docs: doc.text = doc.text + 'world' doc.tags['processe...
from docarray import DocumentArray from jina import Executor, requests from jina.parsers import set_pod_parser class ProcessExecutor(Executor): @requests(on='/') def process(self, docs: DocumentArray, **kwargs): for doc in docs: doc.text = doc.text + 'world' doc.tags['processe...
# Copyright (c) OpenMMLab. All rights reserved. from .csp_darknet import CSPDarknet from .darknet import Darknet from .detectors_resnet import DetectoRS_ResNet from .detectors_resnext import DetectoRS_ResNeXt from .efficientnet import EfficientNet from .hourglass import HourglassNet from .hrnet import HRNet from .mobil...
# Copyright (c) OpenMMLab. All rights reserved. from .csp_darknet import CSPDarknet from .darknet import Darknet from .detectors_resnet import DetectoRS_ResNet from .detectors_resnext import DetectoRS_ResNeXt from .hourglass import HourglassNet from .hrnet import HRNet from .mobilenet_v2 import MobileNetV2 from .pvt im...
from docarray import BaseDoc from docarray.typing import ID def test_set_id(): class MyDocument(BaseDoc): id: ID d = MyDocument(id="123") assert isinstance(d.id, ID) assert d.id == "123"
from docarray import BaseDoc from docarray.typing import ID def test_set_id(): class MyDocument(BaseDoc): id: ID d = MyDocument(id="123") assert isinstance(d.id, ID) assert d.id == "123"
import multiprocessing import socket import sys from threading import Thread import numpy as np import pytest import xgboost as xgb from xgboost import RabitTracker, build_info, federated from xgboost import testing as tm def run_rabit_worker(rabit_env, world_size): with xgb.collective.CommunicatorContext(**rab...
import multiprocessing import socket import sys import time import numpy as np import pytest import xgboost as xgb from xgboost import RabitTracker, build_info, federated if sys.platform.startswith("win"): pytest.skip("Skipping collective tests on Windows", allow_module_level=True) def run_rabit_worker(rabit_e...
import functools from typing import ( Optional, TYPE_CHECKING, Iterable, Callable, Dict, ) from docarray.array.storage.base.backend import BaseBackendMixin from docarray import Document if TYPE_CHECKING: from docarray.typing import ( DocumentArraySourceType, ) def needs_id2offset...
import functools from typing import ( Optional, TYPE_CHECKING, Iterable, Callable, Dict, ) from ..base.backend import BaseBackendMixin from .... import Document if TYPE_CHECKING: from ....typing import ( DocumentArraySourceType, ) def needs_id2offset_rebuild(func) -> Callable: ...
from pathlib import Path import pytest from jina import Document, DocumentArray, Executor from jina.excepts import BadDocType from ...vad_speech_segmenter import VADSpeechSegmenter def test_load(): segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) assert type(segmenter).__name_...
from pathlib import Path import pytest from jina import Executor, DocumentArray, Document from jina.excepts import BadDocType from ...vad_speech_segmenter import VADSpeechSegmenter def test_load(): segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml')) assert type(segmenter).__name_...
from typing import Optional import pytest from docarray import BaseDocument, DocumentArray from docarray.documents import ImageDoc from docarray.helper import ( _access_path_dict_to_nested_dict, _access_path_to_dict, _dict_to_access_paths, _is_access_path_valid, _update_nested_dicts, get_paths...
from typing import Optional import pytest from docarray import BaseDocument, DocumentArray from docarray.documents import ImageDoc from docarray.helper import ( _access_path_dict_to_nested_dict, _access_path_to_dict, _dict_to_access_paths, _is_access_path_valid, _update_nested_dicts, ) @pytest.f...
# Copyright (c) OpenMMLab. All rights reserved. from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform, ContrastTransform, EqualizeTransform, Rotate, Shear, Translate) from .compose import Compose from .formating import (Collect, DefaultFormatBu...
# Copyright (c) OpenMMLab. All rights reserved. from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform, ContrastTransform, EqualizeTransform, Rotate, Shear, Translate) from .compose import Compose from .formating import (Collect, DefaultFormatBu...
""" ========================= Tensor transforms and JIT ========================= .. note:: Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_scripted_tensor_transforms.ipynb>`_ or :ref:`go to the end <sphx_glr_download_auto_examples_othe...
""" ========================= Tensor transforms and JIT ========================= This example illustrates various features that are now supported by the :ref:`image transformations <transforms>` on Tensor images. In particular, we show how image transforms can be performed on GPU, and how one can also script them usi...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
from typing import Any, Mapping, Optional from llama_index.readers.airbyte_cdk.base import AirbyteCDKReader, RecordHandler class AirbyteGongReader(AirbyteCDKReader): """ AirbyteGongReader reader. Retrieve documents from Gong Args: config: The config object for the gong source. """ ...
from typing import Any, Mapping, Optional from llama_index.readers.airbyte_cdk.base import AirbyteCDKReader, RecordHandler class AirbyteGongReader(AirbyteCDKReader): """AirbyteGongReader reader. Retrieve documents from Gong Args: config: The config object for the gong source. """ def _...
import csv import logging import os from typing import List import numpy as np from sentence_transformers import InputExample logger = logging.getLogger(__name__) class CESoftmaxAccuracyEvaluator: """ This evaluator can be used with the CrossEncoder class. It is designed for CrossEncoders with 2 or mo...
import logging import os import csv from typing import List from ... import InputExample import numpy as np logger = logging.getLogger(__name__) class CESoftmaxAccuracyEvaluator: """ This evaluator can be used with the CrossEncoder class. It is designed for CrossEncoders with 2 or more outputs. It meas...
__version__ = '0.16.4' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
__version__ = '0.16.3' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import numpy as np from docarray import BaseDoc from docarray.typing import AnyEmbedding def test_set_embedding(): class MyDocument(BaseDoc): embedding: AnyEmbedding d = MyDocument(embedding=np.zeros((3, 224, 224))) assert isinstance(d.embedding, np.ndarray) assert (d.embedding == np.zeros(...
__version__ = '0.13.5' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
__version__ = '0.13.4' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
from typing import Iterable, Dict, Sequence from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin from docarray.array.storage.base.helper import Offset2ID from docarray import Document class GetSetDelMixin(BaseGetSetDelMixin): """Provide concrete implementation for ``__getitem__``, ``__setitem__``...
from typing import Iterable, Dict, Sequence from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin from docarray.array.storage.base.helper import Offset2ID from docarray import Document class GetSetDelMixin(BaseGetSetDelMixin): """Provide concrete implementation for ``__getitem__``, ``__setitem__``...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa model = dict( type='LAD', data_preprocesso...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa model = dict( type='LAD', data_preprocesso...
from contextlib import contextmanager from functools import partial from unittest.mock import patch import torch from parameterized import parameterized from torchaudio._internal.module_utils import is_module_available from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase from .utils import ...
from contextlib import contextmanager from functools import partial from unittest.mock import patch import torch from parameterized import parameterized from torchaudio._internal.module_utils import is_module_available from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase from .utils import ...
# Copyright (c) OpenMMLab. All rights reserved. import importlib import os.path as osp import subprocess import pkg_resources from pkg_resources import get_distribution def is_installed(package: str) -> bool: """Check package whether installed. Args: package (str): Name of package to be checked. ...
# Copyright (c) OpenMMLab. All rights reserved. import importlib import os.path as osp import subprocess import pkg_resources from pkg_resources import get_distribution def is_installed(package: str) -> bool: """Check package whether installed. Args: package (str): Name of package to be checked. ...
from typing import Generator, Optional import pytest from docarray import BaseDoc, DocArray from docarray.documents import ImageDoc from docarray.typing import ImageUrl, NdArray from docarray.utils.map import map_docs, map_docs_batched from tests.units.typing.test_bytes import IMAGE_PATHS N_DOCS = 2 def load_from_...
from typing import Generator, Optional import pytest from docarray import BaseDoc, DocArray from docarray.documents import ImageDoc from docarray.typing import ImageUrl, NdArray from docarray.utils.map import map_docs, map_docs_batch from tests.units.typing.test_bytes import IMAGE_PATHS N_DOCS = 2 def load_from_do...
from typing import Optional from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE from llama_index.storage.kvstore.elasticsearch import ElasticsearchKVStore class ElasticsearchDocumentStore(KVDocumentStore): """ El...
from typing import Optional from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE from llama_index.storage.kvstore.elasticsearch import ElasticsearchKVStore class ElasticsearchDocumentStore(KVDocumentStore): """Elastic...
from setuptools import setup, find_packages with open("README.md", mode="r", encoding="utf-8") as readme_file: readme = readme_file.read() setup( name="sentence-transformers", version="2.8.0.dev0", author="Nils Reimers", author_email="info@nils-reimers.de", description="Multilingual text embe...
from setuptools import setup, find_packages with open("README.md", mode="r", encoding="utf-8") as readme_file: readme = readme_file.read() setup( name="sentence-transformers", version="2.8.0.dev0", author="Nils Reimers", author_email="info@nils-reimers.de", description="Multilingual text embe...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from parameterized import parameterized from mmdet.models.roi_heads import StandardRoIHead # noqa from mmdet.registry import MODELS from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg ...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from unittest import TestCase import torch from mmengine.data import InstanceData from parameterized import parameterized from mmdet.models.roi_heads import StandardRoIHead # noqa from mmdet.registry import MODELS from mmdet.testing import demo_mm_input...
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_SAMPLERS from .base_sampler import BaseSampler from .sampling_result import SamplingResult @BBOX_SAMPLERS.register_module() class PseudoSampler(BaseSampler): """A pseudo sampler that does not do sampling actually.""" def...
import torch from ..builder import BBOX_SAMPLERS from .base_sampler import BaseSampler from .sampling_result import SamplingResult @BBOX_SAMPLERS.register_module() class PseudoSampler(BaseSampler): """A pseudo sampler that does not do sampling actually.""" def __init__(self, **kwargs): pass def...
# Copyright (c) OpenMMLab. All rights reserved. import warnings import mmcv from ..builder import PIPELINES from .compose import Compose @PIPELINES.register_module() class MultiScaleFlipAug: """Test-time augmentation with multiple scales and flipping. An example configuration is as followed: .. code-b...
# Copyright (c) OpenMMLab. All rights reserved. import warnings import mmcv from ..builder import PIPELINES from .compose import Compose @PIPELINES.register_module() class MultiScaleFlipAug: """Test-time augmentation with multiple scales and flipping. An example configuration is as followed: .. code-b...
from pathlib import Path from typing import Callable, Optional from .folder import ImageFolder from .utils import download_and_extract_archive, verify_str_arg class Country211(ImageFolder): """`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI. This dataset ...
from pathlib import Path from typing import Callable, Optional from .folder import ImageFolder from .utils import download_and_extract_archive, verify_str_arg class Country211(ImageFolder): """`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI. This dataset ...
import os from time import time import numpy as np import pytest from docarray import BaseDoc, DocList from docarray.documents import ImageDoc from docarray.typing import NdArray from docarray.utils.map import map_docs, map_docs_batched from tests.units.typing.test_bytes import IMAGE_PATHS pytestmark = [pytest.mark....
import os from time import time import numpy as np import pytest from docarray import BaseDoc, DocList from docarray.documents import ImageDoc from docarray.typing import NdArray from docarray.utils.map import map_docs, map_docs_batched from tests.units.typing.test_bytes import IMAGE_PATHS pytestmark = [pytest.mark....
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.0.0' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.0.0rc6' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is par...
from pathlib import Path from typing import Any, Callable, Optional, Union from .folder import default_loader from .utils import download_and_extract_archive from .vision import VisionDataset class SUN397(VisionDataset): """`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_. The SUN39...
from pathlib import Path from typing import Any, Callable, Optional, Tuple, Union from .folder import default_loader from .utils import download_and_extract_archive from .vision import VisionDataset class SUN397(VisionDataset): """`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_. Th...
# mypy: allow-untyped-defs import torch from torch import Tensor aten = torch.ops.aten import inspect import warnings from typing import Callable, Optional, TypeVar from typing_extensions import ParamSpec from torch.types import Number decomposition_table: dict[str, torch.jit.ScriptFunction] = {} function_name_set...
# mypy: allow-untyped-defs import torch from torch import Tensor aten = torch.ops.aten import inspect import warnings from typing import Callable, Optional, TypeVar from typing_extensions import ParamSpec from torch.types import Number decomposition_table: dict[str, torch.jit.ScriptFunction] = {} function_name_set...
# coding: utf-8 import logging import numpy as np import lightgbm as lgb def test_register_logger(tmp_path): logger = logging.getLogger("LightGBM") logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s | %(message)s') log_filename = tmp_path / "LightGBM_test_logger.log" fil...
# coding: utf-8 import logging import numpy as np import lightgbm as lgb def test_register_logger(tmp_path): logger = logging.getLogger("LightGBM") logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s | %(message)s') log_filename = tmp_path / "LightGBM_test_logger.log" fil...
from __future__ import annotations import csv import logging import os import numpy as np from sklearn.metrics import ndcg_score logger = logging.getLogger(__name__) class CERerankingEvaluator: """ This class evaluates a CrossEncoder model for the task of re-ranking. Given a query and a list of docume...
from __future__ import annotations import csv import logging import os import numpy as np from sklearn.metrics import ndcg_score logger = logging.getLogger(__name__) class CERerankingEvaluator: """ This class evaluates a CrossEncoder model for the task of re-ranking. Given a query and a list of docume...
from prisma.models import User from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock from backend.blocks.text import FillTextTemplateBlock from backend.data import graph from backend.data.graph import create_graph from backend.data.user import get_or_create_user from backend.util.test import SpinTestSe...
from prisma.models import User from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock from backend.blocks.text import FillTextTemplateBlock from backend.data import graph from backend.data.graph import create_graph from backend.data.user import get_or_create_user from backend.util.test import SpinTestSe...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
from __future__ import annotations import math from pathlib import Path import pytest from packaging.version import Version, parse from tokenizers import Tokenizer from sentence_transformers import SentenceTransformer from sentence_transformers.models.StaticEmbedding import StaticEmbedding try: import model2vec...
from __future__ import annotations import math from pathlib import Path import numpy as np import pytest from packaging.version import Version, parse from tokenizers import Tokenizer from sentence_transformers import SentenceTransformer from sentence_transformers.models.StaticEmbedding import StaticEmbedding try: ...
from llama_index.readers.web import RssReader def test_rss_reader_non_strict_sources(): default_reader = RssReader() documents = default_reader.load_data(urls=["https://news.ycombinator.com/rss"]) assert len(documents) > 0 def test_rss_reader_user_agent(): reader = RssReader(user_agent="MyApp/1.0 +h...
from llama_index.readers.web import RssReader def test_rss_reader_non_strict_sources(): default_reader = RssReader() documents = default_reader.load_data(urls=["https://news.ycombinator.com/rss"]) assert len(documents) > 0 def test_rss_reader_rsshub(): default_reader = RssReader() documents = de...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn import backend.data.block import backend.data.db import backend.data.graph import backend.data.user import backend.server.routers.v1 import backend.util.service import backend.util.se...
import contextlib import logging import typing import fastapi import fastapi.responses import starlette.middleware.cors import uvicorn import backend.data.block import backend.data.db import backend.data.user import backend.server.routers.v1 import backend.util.service import backend.util.settings settings = backend...
import contextlib import os import shutil import time from jina import DocumentArray, Flow cur_dir = os.path.dirname(os.path.abspath(__file__)) @contextlib.contextmanager def _update_file(input_file_path, output_file_path, temp_path): backup_file = os.path.join(temp_path, 'backup.py') try: shutil.co...
import os import time import shutil import contextlib from jina import Flow, DocumentArray cur_dir = os.path.dirname(os.path.abspath(__file__)) @contextlib.contextmanager def _update_file(input_file_path, output_file_path, temp_path): backup_file = os.path.join(temp_path, 'backup.py') try: shutil.co...
_base_ = './htc_x101-32x4d_fpn_16xb1-20e_coco.py' model = dict( backbone=dict( type='ResNeXt', groups=64, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
_base_ = './htc_x101_32x4d_fpn_16x1_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', groups=64, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
import time from functools import partial from huggingface_hub import HfApi, hf_hub_url from packaging import version from requests import HTTPError from .. import config from . import logging logger = logging.get_logger(__name__) # Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Serv...
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str: if version.parse(hfh.__version__).release < version.parse("0.11.0").release: # old versions of hfh don't u...
"""An internal script to process `new_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`. This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form ``` <{url}|New failed tests> { "GH_ydshieh": { "vit": 1 } } ``` """ import json i...
"""An internal script to process `new_model_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`. This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form ``` <{url}|New failed tests> { "GH_ydshieh": { "vit": 1 } } ``` """ import ...
_base_ = [ '../_base_/models/cascade-mask-rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvi...
_base_ = [ '../_base_/models/cascade-mask-rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvi...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_message_histories import ZepChatMessageHistory # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling opt...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_message_histories import ZepChatMessageHistory # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling opt...
import numpy as np import pytest from keras.src import backend from keras.src import initializers from keras.src import layers from keras.src import models from keras.src import testing class SpectralNormalizationTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_basic_spectralnorm(self...
import numpy as np import pytest from keras.src import backend from keras.src import initializers from keras.src import layers from keras.src import models from keras.src import testing class SpectralNormalizationTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_basic_spectralnorm(self...