input
stringlengths
33
5k
output
stringlengths
32
5k
# Copyright (c) OpenMMLab. All rights reserved. import logging import os.path as osp from argparse import ArgumentParser import mmcv from mmengine.config import Config from mmengine.logging import MMLogger from mmengine.utils import mkdir_or_exist from mmdet.apis import inference_detector, init_detector from mmdet.re...
# Copyright (c) OpenMMLab. All rights reserved. import logging import os.path as osp from argparse import ArgumentParser import mmcv from mmengine.config import Config from mmengine.logging import MMLogger from mmengine.utils import mkdir_or_exist from mmdet.apis import inference_detector, init_detector from mmdet.re...
import numpy as np import pytest from keras.src import testing from keras.src.layers.activations import leaky_relu class LeakyReLUTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_leaky_relu(self): self.run_layer_test( leaky_relu.LeakyReLU, init_kwargs={...
import numpy as np import pytest from keras.src import testing from keras.src.layers.activations import leaky_relu class LeakyReLUTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_leaky_relu(self): self.run_layer_test( leaky_relu.LeakyReLU, init_kwargs={...
import numpy as np import pytest import torch from docarray.base_doc import BaseDoc from docarray.base_doc.io.json import orjson_dumps from docarray.typing import AnyUrl, NdArray, TorchTensor @pytest.fixture() def doc_and_class(): class Mmdoc(BaseDoc): img: NdArray url: AnyUrl txt: str ...
import numpy as np import pytest import torch from docarray.base_doc import BaseDoc from docarray.base_doc.io.json import orjson_dumps from docarray.typing import AnyUrl, NdArray, TorchTensor @pytest.fixture() def doc_and_class(): class Mmdoc(BaseDoc): img: NdArray url: AnyUrl txt: str ...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
import warnings from abc import ABC from typing import TYPE_CHECKING, Any, BinaryIO, Dict, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils._internal.misc import import_library, is_notebook if TYPE_CHECKING: from docarray.typing.bytes.audio_bytes import AudioByt...
import warnings from abc import ABC from typing import TYPE_CHECKING, Any, BinaryIO, Dict, TypeVar, Union from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils._internal.misc import import_library, is_notebook if TYPE_CHECKING: from docarray.typing.bytes.audio_bytes import AudioByt...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar...
""" Video audio parser. Contains parsers for mp3, mp4 files. """ from pathlib import Path from typing import Any, Dict, List, Optional, cast import logging from fsspec import AbstractFileSystem from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLog...
"""Video audio parser. Contains parsers for mp3, mp4 files. """ from pathlib import Path from typing import Any, Dict, List, Optional, cast import logging from fsspec import AbstractFileSystem from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document logger = logging.getLogg...
from __future__ import annotations from .splade_callbacks import SchedulerType, SpladeWeightRegulizerSchedulerCallback __all__ = ["SpladeWeightRegulizerSchedulerCallback", "SchedulerType"]
from __future__ import annotations from .splade_callbacks import SchedulerType, SpladeLambdaSchedulerCallback __all__ = ["SpladeLambdaSchedulerCallback", "SchedulerType"]
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py'] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa model = dict( teacher_config='configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py...
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py'] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa model = dict( teacher_config='configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py...
# CREDITS: https://github.com/openai/CLIP import gzip import html from functools import lru_cache from pathlib import Path import ftfy import regex as re @lru_cache() def default_bpe(): return str(Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz') @lru_cache() def bytes_to_unicode(): """ ...
# CREDITS: https://github.com/openai/CLIP import gzip import html import os from functools import lru_cache import ftfy import regex as re @lru_cache() def default_bpe(): return os.path.join(os.getcwd(), '.cache', 'bpe_simple_vocab_16e6.txt.gz') @lru_cache() def bytes_to_unicode(): """ Returns list of...
# Copyright (c) OpenMMLab. All rights reserved. import ast import os.path as osp import re import warnings from typing import Tuple from mmengine.fileio import load from mmengine.utils import check_file_exist PKG2PROJECT = { 'mmcls': 'mmcls', 'mmdet': 'mmdet', 'mmdet3d': 'mmdet3d', 'mmseg': 'mmsegment...
# Copyright (c) OpenMMLab. All rights reserved. import ast class RemoveAssignFromAST(ast.NodeTransformer): """Remove Assign node if the target's name match the key. Args: key (str): The target name of the Assign node. """ def __init__(self, key): self.key = key def visit_Assign(...
from typing import ( TYPE_CHECKING, Iterable, ) from docarray.array.memory import DocumentArrayInMemory if TYPE_CHECKING: from docarray.document import Document class ChunkArray(DocumentArrayInMemory): """ :class:`ChunkArray` inherits from :class:`DocumentArray`. It's a subset of Documents. ...
from typing import ( TYPE_CHECKING, Iterable, ) from .memory import DocumentArrayInMemory if TYPE_CHECKING: from ..document import Document class ChunkArray(DocumentArrayInMemory): """ :class:`ChunkArray` inherits from :class:`DocumentArray`. It's a subset of Documents. :param docs: Set...
from __future__ import annotations import logging from dataclasses import dataclass from sentence_transformers.data_collator import SentenceTransformerDataCollator logger = logging.getLogger(__name__) @dataclass class SparseEncoderDataCollator(SentenceTransformerDataCollator): """Collator for a SparseEncoder m...
from __future__ import annotations import logging from dataclasses import dataclass from sentence_transformers.data_collator import SentenceTransformerDataCollator logger = logging.getLogger(__name__) @dataclass class SparseEncoderDataCollator(SentenceTransformerDataCollator): """Collator for a SparseEncoder m...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine import Config from mmengine.structures import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import DDODHead class TestDDODHead(TestCase): def test_ddod_head_loss(self): """T...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine import Config from mmengine.data import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import DDODHead class TestDDODHead(TestCase): def test_ddod_head_loss(self): """Tests d...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # U...
from __future__ import annotations import pytest from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler from sentence_transformers.sampler import RoundRobinBatchSampler from sentence_transformers.util import is_datasets_available if is_datasets_available(): from datasets import Dataset else:...
from __future__ import annotations import pytest from datasets import Dataset from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler from sentence_transformers.sampler import RoundRobinBatchSampler DATASET_LENGTH = 25 @pytest.fixture def dummy_concat_dataset() -> ConcatDataset: """ Dum...
""" This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in '.github/workflows/pr-labels.yml'. Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external to torchaudio with no labeling responsibility, so we don't want t...
""" This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in '.github/workflows/pr-labels.yml'. Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external to torchaudio with no labeling responsibility, so we don't want t...
import datetime from typing import List import prisma.enums import pydantic class Pagination(pydantic.BaseModel): total_items: int = pydantic.Field( description="Total number of items.", examples=[42] ) total_pages: int = pydantic.Field( description="Total number of pages.", examples=[97]...
import datetime from typing import List import prisma.enums import pydantic class Pagination(pydantic.BaseModel): total_items: int = pydantic.Field( description="Total number of items.", examples=[42] ) total_pages: int = pydantic.Field( description="Total number of pages.", examples=[97]...
import contextlib from collections.abc import Iterable from pathlib import Path from typing import Any from tomlkit import dump, inline_table, load from tomlkit.items import InlineTable def _get_dep_inline_table(path: Path) -> InlineTable: dep = inline_table() dep.update({"path": str(path), "develop": True})...
import contextlib from collections.abc import Iterable from pathlib import Path from typing import Any from tomlkit import dump, inline_table, load from tomlkit.items import InlineTable def _get_dep_inline_table(path: Path) -> InlineTable: dep = inline_table() dep.update({"path": str(path), "develop": True})...
from abc import ABC from typing import Any, Callable, Dict, List, Optional, Union, TypeVar from llama_index.core.llms import ChatMessage from llama_index.core.memory import BaseMemory from llama_index.core.workflow import ( Context, ) from llama_index.core.workflow.checkpointer import CheckpointCallback from llama...
from abc import ABC from typing import Any, Callable, Dict, List, Optional, Union, TypeVar from llama_index.core.llms import ChatMessage from llama_index.core.memory import BaseMemory from llama_index.core.workflow import ( Context, ) from llama_index.core.workflow.checkpointer import CheckpointCallback from llama...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and i...
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and i...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) METAINFO = { 'classes': ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'dinin...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) METAINFO = { 'classes': ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'dinin...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] data_preprocessor = dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True) # model settings model = dict( type='CornerNet', data_preprocessor=data_pr...
_base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] data_preprocessor = dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True) # model settings model = dict( type='CornerNet', data_preprocessor=data_pr...
"""Tool for the SceneXplain API.""" from typing import Optional from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.scenexplain import SceneXplainAPIWrapper class SceneXplainInput(BaseModel...
"""Tool for the SceneXplain API.""" from typing import Optional from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.scenexplain import SceneXplainAPIWrapper class SceneXplainInput(BaseModel...
from .proto import ProtoArrayMixin
from abc import ABC from docarray.array.mixins.content import ContentPropertyMixin from docarray.array.mixins.delitem import DelItemMixin from docarray.array.mixins.embed import EmbedMixin from docarray.array.mixins.empty import EmptyMixin from docarray.array.mixins.evaluation import EvaluationMixin from docarray.arra...
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class SSDNeck(BaseModule): """Extra layers of SSD backbone to generate multi-sca...
import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class SSDNeck(BaseModule): """Extra layers of SSD backbone to generate multi-scale feature maps. Args: in_channels ...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from docarray.typing.url.any_url import AnyUrl from docarray.typing.url.audio_url import AudioUrl from docarray.typing.url.image_url import ImageUrl from docarray.typing.url.text_url import TextUrl from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl from docarray.typing.url.url_3d.point_cloud_url import PointClou...
import os import pytest from llama_index.core.tools.tool_spec.base import BaseToolSpec from llama_index.tools.notion import NotionToolSpec # Get yourself a page id and database id from your notion account # Refer to the page: https://developers.notion.com/docs/create-a-notion-integration#give-your-integration-page-pe...
from llama_index.core.tools.tool_spec.base import BaseToolSpec from llama_index.tools.notion import NotionToolSpec def test_class(): names_of_base_classes = [b.__name__ for b in NotionToolSpec.__mro__] assert BaseToolSpec.__name__ in names_of_base_classes
"""Google Search tool spec.""" import json import urllib.parse from typing import Optional import requests from llama_index.core.schema import Document from llama_index.core.tools.tool_spec.base import BaseToolSpec QUERY_URL_TMPL = ( "https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}" ) ...
"""Google Search tool spec.""" import urllib.parse from typing import Optional import requests from llama_index.core.schema import Document from llama_index.core.tools.tool_spec.base import BaseToolSpec QUERY_URL_TMPL = ( "https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}" ) class Goog...
from typing import Any, Dict, Optional, Type, cast from llama_index.core.llms.llm import LLM from llama_index.core.output_parsers.pydantic import PydanticOutputParser from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate from llama_index.core.settings import Settings from llama_index.core.types ...
from typing import Any, Dict, Optional, Type, cast from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.llms.llm import LLM from llama_index.core.output_parsers.pydantic import PydanticOutputParser from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate from llama_index.cor...
""" This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training. It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version. Nowadays, with Sentence Transformers v3+, it is recommended to use the `Senten...
from __future__ import annotations import numpy as np from torch.utils.data import Dataset from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available from sentence_transformers.readers.InputExample import InputExample class DenoisingAutoEncoderDataset(Dataset): """ The DenoisingAutoEnc...
__version__ = '0.12.4' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
__version__ = '0.12.3' import os from .document import Document from .array import DocumentArray from .dataclasses import dataclass, field if 'DA_NO_RICH_HANDLER' not in os.environ: from rich.traceback import install install()
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) pytestmark = pytest.mark.integration @pytest.mark.parametrize("path", ["paws", "csv"]) def test_inspect_dataset(p...
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) pytestmark = pytest.mark.integration @pytest.mark.parametrize("path", ["paws", "csv"]) def test_inspect_dataset(p...
import orjson from pydantic.json import ENCODERS_BY_TYPE from docarray.typing.abstract_type import AbstractType def _default_orjson(obj): """ default option for orjson dumps. :param obj: :return: return a json compatible object """ if isinstance(obj, AbstractType): return obj._docarr...
import orjson from docarray.typing.tensor.abstract_tensor import AbstractTensor def _default_orjson(obj): """ default option for orjson dumps. :param obj: :return: return a json compatible object """ if isinstance(obj, AbstractTensor): return obj._docarray_to_json_compatible() el...
from markitdown import MarkItDown from llama_index.core.bridge.pydantic import BaseModel, model_validator import os from pathlib import Path from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from typing import Tuple, Optional, Union, List from typing_extensions imp...
from markitdown import MarkItDown from llama_index.core.bridge.pydantic import BaseModel, model_validator import os from pathlib import Path from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from typing import Tuple, Optional, Union, List from typing_extensions imp...
from pathlib import Path from typing import Any from langchain_core._api.path import as_import_path def __getattr__(name: str) -> Any: """Get attr name.""" if name == "create_csv_agent": # Get directory of langchain package HERE = Path(__file__).parents[3] here = as_import_path(Path(...
from pathlib import Path from typing import Any from langchain_core._api.path import as_import_path def __getattr__(name: str) -> Any: """Get attr name.""" if name == "create_csv_agent": # Get directory of langchain package HERE = Path(__file__).parents[3] here = as_import_path(Path(...
_base_ = './mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py' # learning policy max_epochs = 24 train_cfg = dict(max_epochs=max_epochs) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, ...
_base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py' # learning policy max_epochs = 24 train_cfg = dict(max_epochs=max_epochs) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, ...
import datetime import autogpt_libs.auth as autogpt_auth_lib import fastapi.testclient import pytest import pytest_mock import backend.server.model as server_model import backend.server.v2.library.model as library_model from backend.server.v2.library.routes import router as library_router app = fastapi.FastAPI() app...
import datetime import autogpt_libs.auth as autogpt_auth_lib import fastapi import fastapi.testclient import pytest import pytest_mock import backend.server.model as server_model import backend.server.v2.library.model as library_model from backend.server.v2.library.routes import router as library_router app = fastap...
from .database import DatabaseManager, DatabaseManagerAsyncClient, DatabaseManagerClient from .manager import ExecutionManager from .scheduler import Scheduler __all__ = [ "DatabaseManager", "DatabaseManagerClient", "DatabaseManagerAsyncClient", "ExecutionManager", "Scheduler", ]
from .database import DatabaseManager, DatabaseManagerClient from .manager import ExecutionManager from .scheduler import Scheduler __all__ = [ "DatabaseManager", "DatabaseManagerClient", "ExecutionManager", "Scheduler", ]
from __future__ import annotations import json import logging import os from typing import Literal import torch from torch import Tensor, nn from .tokenizer import WhitespaceTokenizer logger = logging.getLogger(__name__) class BoW(nn.Module): """Implements a Bag-of-Words (BoW) model to derive sentence embeddi...
from __future__ import annotations import json import logging import os from typing import Literal import torch from torch import Tensor, nn from .tokenizer import WhitespaceTokenizer logger = logging.getLogger(__name__) class BoW(nn.Module): """Implements a Bag-of-Words (BoW) model to derive sentence embeddi...
"""langchain-core version information and utilities.""" VERSION = "0.3.62"
"""langchain-core version information and utilities.""" VERSION = "0.3.61"
"""Parser for JSON output.""" from __future__ import annotations import json from json import JSONDecodeError from typing import Annotated, Any, Optional, TypeVar, Union import jsonpatch # type: ignore[import-untyped] import pydantic from pydantic import SkipValidation from langchain_core.exceptions import OutputP...
"""Parser for JSON output.""" from __future__ import annotations import json from json import JSONDecodeError from typing import Annotated, Any, Optional, TypeVar, Union import jsonpatch # type: ignore[import-untyped] import pydantic from pydantic import SkipValidation from langchain_core.exceptions import OutputP...
from keras.src.backend.common.name_scope import name_scope from keras.src.backend.jax import core from keras.src.backend.jax import distribution_lib from keras.src.backend.jax import image from keras.src.backend.jax import linalg from keras.src.backend.jax import math from keras.src.backend.jax import nn from keras.src...
from keras.src.backend.jax import core from keras.src.backend.jax import distribution_lib from keras.src.backend.jax import image from keras.src.backend.jax import linalg from keras.src.backend.jax import math from keras.src.backend.jax import nn from keras.src.backend.jax import numpy from keras.src.backend.jax import...
from llama_index_instrumentation.span.simple import SimpleSpan # noqa
from typing import Dict, Optional from llama_index.core.bridge.pydantic import Field from llama_index.core.instrumentation.span.base import BaseSpan from datetime import datetime class SimpleSpan(BaseSpan): """Simple span class.""" start_time: datetime = Field(default_factory=lambda: datetime.now()) end_...
import os from pathlib import Path from typing import List, Tuple, Union from torch import Tensor from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.librispeech import load_librispeech_item from torchaudio.datasets.utils import extract_archive _ARCHIVE_NAME = "li...
import os from pathlib import Path from typing import List, Tuple, Union from torch import Tensor from torch.hub import download_url_to_file from torch.utils.data import Dataset from torchaudio.datasets.librispeech import load_librispeech_item from torchaudio.datasets.utils import extract_archive _ARCHIVE_NAME = "li...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from torch import Tensor from mmdet.core.utils import OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .anchor_head import AnchorHead @...
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from mmdet.registry import MODELS from .anchor_head import AnchorHead @MODELS.register_module() class RetinaSepBNHead(AnchorHead): """"RetinaHead with separate BN. In Retin...
from langchain_core.utils.utils import ( build_extra_kwargs, check_package_version, convert_to_secret_str, get_pydantic_field_names, guard_import, mock_now, raise_for_status_with_text, xor_args, ) __all__ = [ "build_extra_kwargs", "check_package_version", "convert_to_secret_...
from langchain_core.utils.utils import ( build_extra_kwargs, check_package_version, convert_to_secret_str, get_pydantic_field_names, guard_import, mock_now, raise_for_status_with_text, xor_args, ) __all__ = [ "xor_args", "raise_for_status_with_text", "mock_now", "guard_i...
import os import pytest import torch import whisper @pytest.mark.parametrize("model_name", whisper.available_models()) def test_transcribe(model_name: str): device = "cuda" if torch.cuda.is_available() else "cpu" model = whisper.load_model(model_name).to(device) audio_path = os.path.join(os.path.dirname...
import os import pytest import torch import whisper @pytest.mark.parametrize("model_name", whisper.available_models()) def test_transcribe(model_name: str): device = "cuda" if torch.cuda.is_available() else "cpu" model = whisper.load_model(model_name).to(device) audio_path = os.path.join(os.path.dirname...
import torch _TORCHFUNCTION_SUBCLASS = False class _ReturnTypeCM: def __init__(self, to_restore): self.to_restore = to_restore def __enter__(self): return self def __exit__(self, *args): global _TORCHFUNCTION_SUBCLASS _TORCHFUNCTION_SUBCLASS = self.to_restore def set_r...
import torch _TORCHFUNCTION_SUBCLASS = False class _ReturnTypeCM: def __init__(self, to_restore): self.to_restore = to_restore def __enter__(self): return self def __exit__(self, *args): global _TORCHFUNCTION_SUBCLASS _TORCHFUNCTION_SUBCLASS = self.to_restore def set_r...
# Copyright (c) OpenMMLab. All rights reserved. """Collecting some commonly used type hint in mmdetection.""" from typing import Dict, List, Optional, Sequence, Tuple, Union import torch from mmengine.config import ConfigDict from mmengine.data import InstanceData, PixelData from ..bbox.samplers import SamplingResult...
# Copyright (c) OpenMMLab. All rights reserved. """Collecting some commonly used type hint in mmdetection.""" from typing import Dict, List, Optional, Tuple, Union import torch from mmengine.config import ConfigDict from mmengine.data import InstanceData, PixelData from ..bbox.samplers import SamplingResult from ..da...
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
"""String utilities.""" from typing import Any def stringify_value(val: Any) -> str: """Stringify a value. Args: val: The value to stringify. Returns: str: The stringified value. """ if isinstance(val, str): return val elif isinstance(val, dict): return "\n" ...
from typing import Any def stringify_value(val: Any) -> str: """Stringify a value. Args: val: The value to stringify. Returns: str: The stringified value. """ if isinstance(val, str): return val elif isinstance(val, dict): return "\n" + stringify_dict(val) ...
import abc import io import pathlib import pickle from typing import Any, BinaryIO, cast, Dict, Iterator, List, Optional, Tuple, Union import numpy as np from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper from torchvision.datapoints import Image from torchvision.prototype.datapoints import Label from to...
import abc import io import pathlib import pickle from typing import Any, BinaryIO, cast, Dict, Iterator, List, Optional, Tuple, Union import numpy as np from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper from torchvision.prototype.datapoints import Image, Label from torchvision.prototype.datasets.utils...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.3.0' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versio...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.2.0' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versio...
try: from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER except ModuleNotFoundError: _HAS_GPU_VIDEO_DECODER = False from ._video_opt import ( _HAS_CPU_VIDEO_DECODER, _HAS_VIDEO_OPT, _probe_video_from_file, _probe_video_from_memory, _read_video_from_file, _read_video_from_memory, _...
from typing import Any, Dict, Iterator import torch from ..utils import _log_api_usage_once try: from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER except ModuleNotFoundError: _HAS_GPU_VIDEO_DECODER = False from ._video_opt import ( _HAS_CPU_VIDEO_DECODER, _HAS_VIDEO_OPT, _probe_video_from_fi...
import logging from typing import TYPE_CHECKING if TYPE_CHECKING: from backend.util.process import AppProcess logger = logging.getLogger(__name__) def run_processes(*processes: "AppProcess", **kwargs): """ Execute all processes in the app. The last process is run in the foreground. Includes enhanced...
from typing import TYPE_CHECKING if TYPE_CHECKING: from backend.util.process import AppProcess def run_processes(*processes: "AppProcess", **kwargs): """ Execute all processes in the app. The last process is run in the foreground. """ try: for process in processes[:-1]: proces...
import os import pytest import torch import torchaudio class GreedyCTCDecoder(torch.nn.Module): def __init__(self, labels, blank: int = 0): super().__init__() self.blank = blank self.labels = labels def forward(self, logits: torch.Tensor) -> str: """Given a sequence logits ov...
import pytest import torch import torchaudio class GreedyCTCDecoder(torch.nn.Module): def __init__(self, labels, blank: int = 0): super().__init__() self.blank = blank self.labels = labels def forward(self, logits: torch.Tensor) -> str: """Given a sequence logits over labels, ...
from workflows.errors import ( ContextSerdeError, # noqa WorkflowCancelledByUser, # noqa WorkflowConfigurationError, # noqa WorkflowDone, # noqa WorkflowRuntimeError, # noqa WorkflowStepDoesNotExistError, # noqa WorkflowTimeoutError, # noqa WorkflowValidationError, # noqa )
class WorkflowValidationError(Exception): pass class WorkflowTimeoutError(Exception): pass class WorkflowRuntimeError(Exception): pass class WorkflowDone(Exception): pass class WorkflowCancelledByUser(Exception): pass class WorkflowStepDoesNotExistError(Exception): pass class Workflo...
import asyncio import sys import pytest from llama_index.core import Document from llama_index.graph_rag.cognee import CogneeGraphRAG def test_smoke(): """No-op test: CI will fail if no tests are collected.""" @pytest.mark.skipif( sys.version_info < (3, 10), reason="mock strategy requires python3.10 or hig...
from llama_index.core import Document import asyncio import pytest from llama_index.graph_rag.cognee import CogneeGraphRAG @pytest.mark.asyncio() async def test_add_data(monkeypatch): # Instantiate cognee GraphRAG cogneeGraphRAG = CogneeGraphRAG( llm_api_key="", llm_provider="openai", ...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
import PIL.Image import torch from torchvision import datapoints from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_kernel_internal def erase( inpt: torch.Tensor, i: int, j: int, h: in...
import PIL.Image import torch from torchvision import datapoints from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_kernel_internal def erase( inpt: torch.Tensor, i: int, j: int, h: in...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union from docarray.base_doc import BaseDoc from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal.misc import import_library T = TypeVar('T', bound='VerticesAndFaces') class VerticesAndFaces(BaseDoc): """ Document for handling...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union from docarray.base_doc import BaseDoc from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal.misc import import_library T = TypeVar('T', bound='VerticesAndFaces') class VerticesAndFaces(BaseDoc): """ Document for handling...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch import torch.nn as nn from mmengine.runner import load_checkpoint from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import ConfigType, OptConfigType from ..utils.m...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch import torch.nn as nn from mmengine.runner import load_checkpoint from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import ConfigType, OptConfigType from ..utils.m...
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unles...
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unles...
from pathlib import Path from typing import List import pytest from dpr_text import DPRTextEncoder from jina import Document, DocumentArray, Executor _EMBEDDING_DIM = 768 @pytest.fixture(scope='session') def basic_encoder() -> DPRTextEncoder: return DPRTextEncoder() @pytest.fixture(scope='session') def basic_...
from pathlib import Path from typing import List import pytest from dpr_text import DPRTextEncoder from jina import Document, DocumentArray, Executor _EMBEDDING_DIM = 768 @pytest.fixture(scope='session') def basic_encoder() -> DPRTextEncoder: return DPRTextEncoder() @pytest.fixture(scope='session') def basic_...
import copy import importlib import os import sys from keras.src import backend as backend_module from keras.src.api_export import keras_export from keras.src.backend.common import global_state def in_tf_graph(): if global_state.get_global_attribute("in_tf_graph_scope", False): return True if "tenso...
import copy import importlib import os import sys from keras.src import backend as backend_module from keras.src.api_export import keras_export from keras.src.backend.common import global_state def in_tf_graph(): if global_state.get_global_attribute("in_tf_graph_scope", False): return True if "tenso...
import pytest from .utils import remove_color_codes @pytest.mark.parametrize( "raw_text, clean_text", [ ( "COMMAND = \x1b[36mbrowse_website\x1b[0m " "ARGUMENTS = \x1b[36m{'url': 'https://www.google.com'," " 'question': 'What is the capital of France?'}\x1b[0m", ...
import pytest from .utils import remove_color_codes @pytest.mark.parametrize( "raw_text, clean_text", [ ( "COMMAND = \x1b[36mbrowse_website\x1b[0m " "ARGUMENTS = \x1b[36m{'url': 'https://www.google.com'," " 'question': 'What is the capital of France?'}\x1b[0m", ...
import inspect import threading from typing import Any, Awaitable, Callable, ParamSpec, TypeVar, cast, overload P = ParamSpec("P") R = TypeVar("R") @overload def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ... @overload def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ......
import threading from typing import Callable, ParamSpec, TypeVar P = ParamSpec("P") R = TypeVar("R") def thread_cached(func: Callable[P, R]) -> Callable[P, R]: thread_local = threading.local() def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: cache = getattr(thread_local, "cache", None) i...
import pytest from datasets.exceptions import DatasetNotFoundError from datasets.inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_default_config_name, get_dataset_infos, get_dataset_split_names, ) pytestmark = pytest.mark.integration @pytest.mark.parametrize( ...
import pytest from datasets.exceptions import DatasetNotFoundError from datasets.inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_default_config_name, get_dataset_infos, get_dataset_split_names, ) pytestmark = pytest.mark.integration @pytest.mark.parametrize( ...
import codecs from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField class TextDecoderBlock(Block): class Input(BlockSchema): text: str = SchemaField( description="A string containing escaped characters to be decoded", ...
import codecs from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField class TextDecoderBlock(Block): class Input(BlockSchema): text: str = SchemaField( description="A string containing escaped characters to be decoded", ...
"""**Schemas** are the LangChain Base Classes and Interfaces.""" from langchain_core.agents import AgentAction, AgentFinish from langchain_core.caches import BaseCache from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.documents import BaseDocumentTransformer, Document from langchain_co...
"""**Schemas** are the LangChain Base Classes and Interfaces.""" from langchain_core.agents import AgentAction, AgentFinish from langchain_core.caches import BaseCache from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.documents import BaseDocumentTransformer, Document from langchain_co...
from langchain_core.messages import ( AIMessage, FunctionMessage, HumanMessage, SystemMessage, ) from langchain_core.output_parsers.openai_tools import ( parse_tool_call, ) from langchain_community.chat_models.tongyi import ( convert_dict_to_message, convert_message_to_dict, ) def test__c...
from langchain_core.messages import ( AIMessage, FunctionMessage, HumanMessage, SystemMessage, ) from langchain_core.output_parsers.openai_tools import ( parse_tool_call, ) from langchain_community.chat_models.tongyi import ( convert_dict_to_message, convert_message_to_dict, ) def test__c...
from .text_paddle import TextPaddleEncoder
from .text_paddle import TextPaddleEncoder
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.memorize.tool import Memorize, TrainableLLM # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optio...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools.memorize.tool import Memorize, TrainableLLM # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optio...
import pytest from langchain.evaluation.string_distance import ( PairwiseStringDistanceEvalChain, StringDistance, StringDistanceEvalChain, ) @pytest.mark.requires("rapidfuzz") @pytest.mark.parametrize("distance", list(StringDistance)) def test_zero_distance(distance: StringDistance) -> None: eval_cha...
import pytest from langchain.evaluation.string_distance import ( PairwiseStringDistanceEvalChain, StringDistance, StringDistanceEvalChain, ) @pytest.mark.requires("rapidfuzz") @pytest.mark.parametrize("distance", list(StringDistance)) def test_zero_distance(distance: StringDistance) -> None: eval_cha...
from typing import Optional from docarray import DocList, BaseDoc from docarray.typing import NdArray from jina import Executor, requests import numpy as np class MyDoc(BaseDoc): text: str embedding: Optional[NdArray] = None class Encoder(Executor): def __init__( self, *args, **k...
from typing import Optional from docarray import DocList, BaseDoc from docarray.typing import NdArray from jina import Executor, requests import numpy as np class MyDoc(BaseDoc): text: str embedding: Optional[NdArray] = None class Encoder(Executor): def __init__( self, *args, ...
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( # use ResNeSt img_norm data_preprocessor=dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], bgr_to_rgb=True), backbone=dict( type='ResNeSt', ...
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( type='ResNeSt', stem_channels=64, depth=50, radix=2, reduction_factor=4, avg_down_stride=True, num_stages=4, out_indice...
from setuptools import find_packages, setup with open("README.md", mode="r", encoding="utf-8") as readme_file: readme = readme_file.read() setup( name="sentence-transformers", version="3.0.0.dev0", author="Nils Reimers", author_email="info@nils-reimers.de", description="Multilingual text embe...
from setuptools import setup, find_packages with open("README.md", mode="r", encoding="utf-8") as readme_file: readme = readme_file.read() setup( name="sentence-transformers", version="3.0.0.dev0", author="Nils Reimers", author_email="info@nils-reimers.de", description="Multilingual text embe...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.optimizers import legacy as legacy from keras.optimizers import schedules as schedules from keras.src.optimizers import deserialize as deserialize from keras.src.optimizers import get as ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api.optimizers import legacy from keras.api.optimizers import schedules from keras.src.optimizers import deserialize from keras.src.optimizers import get from keras.src.optimizers import ...
"""**Load** module helps with serialization and deserialization.""" from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: from langchain_core.load.dump import dumpd, dumps from langchain_core.load.load import load, loads from langchain_core.load.serializable import Seriali...
"""**Load** module helps with serialization and deserialization.""" from langchain_core.load.dump import dumpd, dumps from langchain_core.load.load import load, loads from langchain_core.load.serializable import Serializable __all__ = ["dumpd", "dumps", "load", "loads", "Serializable"]
from urllib.parse import urlparse from backend.blocks.github._auth import GithubCredentials from backend.util.request import Requests def _convert_to_api_url(url: str) -> str: """ Converts a standard GitHub URL to the corresponding GitHub API URL. Handles repository URLs, issue URLs, pull request URLs, a...
from urllib.parse import urlparse from backend.blocks.github._auth import GithubCredentials from backend.util.request import Requests def _convert_to_api_url(url: str) -> str: """ Converts a standard GitHub URL to the corresponding GitHub API URL. Handles repository URLs, issue URLs, pull request URLs, a...
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # Use RepeatDataset to speed up training # change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) train_dataloader = dict(dataset=dict(times=2))
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # Use RepeatDataset to speed up training # change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) data = dict(train=dict(times=2))
from enum import Enum from typing import TYPE_CHECKING, Union, overload import numpy as np if TYPE_CHECKING: import torch # pants: no-infer-dep class Pooling(str, Enum): """Enum of possible pooling choices with pooling behaviors.""" CLS = "cls" MEAN = "mean" def __call__(self, array: np.ndarr...
from enum import Enum from typing import TYPE_CHECKING, Union, overload import numpy as np if TYPE_CHECKING: import torch # pants: no-infer-dep class Pooling(str, Enum): """Enum of possible pooling choices with pooling behaviors.""" CLS = "cls" MEAN = "mean" def __call__(self, array: np.ndarr...
from __future__ import annotations import random import pytest import torch from torch.utils.data import ConcatDataset from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler from sentence_transformers.util import is_datasets_available if is_datasets_available(): from datase...
from __future__ import annotations import random import pytest import torch from datasets import Dataset from torch.utils.data import ConcatDataset from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler @pytest.fixture def dummy_dataset() -> Dataset: """ Dummy dataset ...
import pathlib from typing import Any, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper from torchvision.datapoints import BoundingBoxes from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils...
import pathlib from typing import Any, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper from torchvision.datapoints import BoundingBox from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils i...
import json from typing import Optional from cryptography.fernet import Fernet from backend.util.settings import Settings ENCRYPTION_KEY = Settings().secrets.encryption_key class JSONCryptor: def __init__(self, key: Optional[str] = None): # Use provided key or get from environment self.key = ke...
import json from typing import Optional from cryptography.fernet import Fernet from backend.util.settings import Settings ENCRYPTION_KEY = Settings().secrets.encryption_key class JSONCryptor: def __init__(self, key: Optional[str] = None): # Use provided key or get from environment self.key = ke...
"""Memory used to save agent output AND intermediate steps.""" from typing import Any from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain.agents.format_scratchpad import ( format_to_openai_function_messages, format_to_...
"""Memory used to save agent output AND intermediate steps.""" from typing import Any from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain.agents.format_scratchpad import ( format_to_openai_function_messages, format_to_...
import pytest from sentence_transformers import SentenceTransformer @pytest.mark.parametrize( ("revision", "expected_base_revision"), [ ("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"), ("f3cb857", "f3cb857"), ("main", "valid-revision"), ...
from sentence_transformers import SentenceTransformer import pytest @pytest.mark.parametrize( ("revision", "expected_base_revision"), [ ("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"), ("f3cb857", "f3cb857"), ("main", "valid-revision"), ...
import pathlib from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils import Dataset, Encoded...
import pathlib from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvisio...
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.utils import Registry, build_from_cfg PRIOR_GENERATORS = Registry('Generator for anchors and points') ANCHOR_GENERATORS = PRIOR_GENERATORS def build_prior_generator(cfg, default_args=None): return build_from_cfg(cfg, PRIOR_GENERATORS, de...
import warnings from mmcv.utils import Registry, build_from_cfg PRIOR_GENERATORS = Registry('Generator for anchors and points') ANCHOR_GENERATORS = PRIOR_GENERATORS def build_prior_generator(cfg, default_args=None): return build_from_cfg(cfg, PRIOR_GENERATORS, default_args) def build_anchor_generator(cfg, de...
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py' model = dict(roi_head=dict(bbox_head=dict(num_classes=3))) classes = ('person', 'bicycle', 'car') data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) load_from = 'https://download.openmmlab.com/mmdetection/v2....
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' model = dict(roi_head=dict(bbox_head=dict(num_classes=3))) classes = ('person', 'bicycle', 'car') data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) load_from = 'https://download.openmmlab.com/mmdetectio...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.densenet import DenseNet121 as DenseNet121 from keras.src.applications.densenet import DenseNet169 as DenseNet169 from keras.src.applications.densenet import DenseNet201 ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.applications.densenet import DenseNet121 from keras.src.applications.densenet import DenseNet169 from keras.src.applications.densenet import DenseNet201 from keras.src.applications.de...
import pytest from jina.enums import GatewayProtocolType from jina.helper import ArgNamespace from jina.parsers import set_gateway_parser, set_pod_parser @pytest.mark.parametrize( 'port,expected_port', [ ('12345', [12345]), ([12345], [12345]), ([12345, 12344], [12345, 12344]), ], ...
import pytest from jina.enums import GatewayProtocolType from jina.helper import ArgNamespace from jina.parsers import set_gateway_parser, set_pod_parser @pytest.mark.parametrize( 'port,expected_port', [ ('12345', [12345]), ([12345], [12345]), ([12345, 12344], [12345, 12344]), ], ...
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests."""
import pytest @pytest.mark.compile def test_placeholder() -> None: """Used for compiling integration tests without running any real tests.""" pass
# Copyright 2019 The OpenXLA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in ...
# Copyright 2019 The OpenXLA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.legacy.preprocessing.image import ( DirectoryIterator as DirectoryIterator, ) from keras.src.legacy.preprocessing.image import ( ImageDataGenerator as ImageDataGenerator, ) fr...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.legacy.preprocessing.image import DirectoryIterator from keras.src.legacy.preprocessing.image import ImageDataGenerator from keras.src.legacy.preprocessing.image import Iterator from ...
import os import sys import pytest import torch import torchaudio from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples")) from source_separation.utils.metrics import sdr @pytest.mark.par...
import os import sys import pytest import torch import torchaudio from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX from torchaudio.prototype.pipelines import HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples")) from source_separation.u...
from urllib.parse import urlparse from backend.blocks.github._auth import GithubCredentials from backend.util.request import Requests def _convert_to_api_url(url: str) -> str: """ Converts a standard GitHub URL to the corresponding GitHub API URL. Handles repository URLs, issue URLs, pull request URLs, a...
from urllib.parse import urlparse from backend.blocks.github._auth import GithubCredentials from backend.util.request import Requests def _convert_to_api_url(url: str) -> str: """ Converts a standard GitHub URL to the corresponding GitHub API URL. Handles repository URLs, issue URLs, pull request URLs, a...
from torchaudio._internal.module_utils import dropping_io_support, dropping_class_io_support # Initialize extension and backend first from . import _extension # noqa # usort: skip from ._backend import ( # noqa # usort: skip AudioMetaData as _AudioMetaData, get_audio_backend as _get_audio_backend, info...
from torchaudio._internal.module_utils import dropping_io_support # Initialize extension and backend first from . import _extension # noqa # usort: skip from ._backend import ( # noqa # usort: skip AudioMetaData, get_audio_backend as _get_audio_backend, info as _info, list_audio_backends as _list_a...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pycocotools.mask as mask_util import torch from mmengine.utils import slice_list def split_combined_polys(polys, poly_lens, polys_per_mask): """Split the combined 1-D polys into masks. A mask is represented as a list of polys, and a po...
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import pycocotools.mask as mask_util import torch def split_combined_polys(polys, poly_lens, polys_per_mask): """Split the combined 1-D polys into masks. A mask is represented as a list of polys, and a poly is represented as a...