input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract... | """Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract ... |
from typing import TYPE_CHECKING
import numpy as np
from docarray.dataclasses.enums import DocumentMetadata, ImageType
if TYPE_CHECKING: # pragma: no cover
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isins... | from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isinstance(value, str):
doc.uri = value
doc._metadata['im... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.storage import UpstashRedisByteStore, UpstashRedisStore
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.storage import UpstashRedisByteStore, UpstashRedisStore
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... |
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat, is_rotated_bounding_format
from ._image import Image
from ._keypoints import KeyPoints
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
# TODO: Fix this. ... | import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat, is_rotated_bounding_format
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
# TODO: Fix this. We skip this method as it leads to... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class Tes... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class Tes... |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
HubSpotCredentials = APIKeyCredentials
HubSpotCredentialsInput = CredentialsMetaInput[
Literal["hubspot"],
Literal["api_key"],
]
def HubSpotCredentialsField() -... | from typing import Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput
HubSpotCredentials = APIKeyCredentials
HubSpotCredentialsInput = CredentialsMetaInput[
Literal["h... |
import pathlib
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class StanfordCars(VisionDataset):
"""`Stanford Cars <https://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_ D... | import pathlib
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class StanfordCars(VisionDataset):
"""`Stanford Cars <https://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_ D... |
import os
import pytest
import respx
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
from llama_index.core.schema import NodeWithScore, Document
from typing import Any
@pytest.fixture()
def mock_local_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get("https://test_url/v1... | import os
import pytest
import respx
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
from llama_index.core.schema import NodeWithScore, Document
from typing import Any
@pytest.fixture()
def mock_local_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get("https://test_url/v1... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import VGG
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class NumClassCheckHook(Hook):
"""Check whether the `num_classes` in head matches the length of `CLASSES`
in `d... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import VGG
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class NumClassCheckHook(Hook):
"""Check whether the `num_classes` in head matches the length of `CLASSES`
in `d... |
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: Senten... | from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: Senten... |
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import T... | """Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar100 import load_data as load_data
| """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar100 import load_data
|
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
... | """
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling op... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling op... |
"""Azure Cognitive Vision tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
CV_URL_TMPL = "https://{resource}.cognitiveservices.azure.com/computervision/imageanalysis:analyze"
class AzureCVToolSpec(BaseToolSpec):
"""Azure Cognitive Vi... | """Azure Cognitive Vision tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
CV_URL_TMPL = "https://{resource}.cognitiveservices.azure.com/computervision/imageanalysis:analyze"
class AzureCVToolSpec(BaseToolSpec):
"""Azure Cognitive Vi... |
import os
import pytest
from unittest import mock
from zhipuai.types.chat.chat_completion import (
Completion,
CompletionChoice,
CompletionMessage,
CompletionUsage,
)
from llama_index.core.base.llms.types import CompletionResponse
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms... | import os
import pytest
from unittest import mock
from zhipuai.types.chat.chat_completion import (
Completion,
CompletionChoice,
CompletionMessage,
CompletionUsage,
)
from llama_index.core.base.llms.types import CompletionResponse
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms... |
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_A... | import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_A... |
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIR... | import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIR... |
_base_ = './maskformer_r50_ms-16xb1-75e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384... | _base_ = './maskformer_r50_mstrain_16x1_75e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
import os
import sys
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
retur... | import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
return path
de... |
from __future__ import annotations
import json
from typing import TYPE_CHECKING
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.utilities.jira import JiraAPIWrapper
if TYPE_CHECKING:
from collections.abc import Iterator
@pytest.fixture
def mock_jira() -> Iterator[MagicMock]:
... | import json
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.utilities.jira import JiraAPIWrapper
@pytest.fixture
def mock_jira(): # type: ignore
with patch("atlassian.Jira") as mock_jira:
yield mock_jira
@pytest.mark.requires("atlassian")
class TestJiraAPIWrapper:
... |
import json
import logging
from typing import List, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
class UpstashRedisChatMessageHistory(BaseChatMessageH... | import json
import logging
from typing import List, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
class UpstashRedisChatMessageHistory(BaseChatMessageH... |
import binascii
import codecs
import marshal
import os
import types as python_types
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
re... | import binascii
import codecs
import marshal
import os
import types as python_types
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
re... |
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
... | _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)... |
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typ... | from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typ... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric i... | # Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results im... |
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing... | """**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.structures import InstanceData
from mmdet.registry import TASK_UTILS
from ..assigners import AssignResult
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class PseudoSampler(Base... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.data import InstanceData
from mmdet.registry import TASK_UTILS
from ..assigners import AssignResult
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class PseudoSampler(BaseSample... |
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
... | _base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
... |
from langchain_core.documents import Document
from langchain_core.language_models import FakeListLLM
from langchain_core.prompts import PromptTemplate
from langchain.chains import create_history_aware_retriever
from tests.unit_tests.retrievers.parrot_retriever import FakeParrotRetriever
def test_create() -> None:
... | from langchain_core.documents import Document
from langchain_core.language_models import FakeListLLM
from langchain_core.prompts import PromptTemplate
from langchain.chains import create_history_aware_retriever
from tests.unit_tests.retrievers.parrot_retriever import FakeParrotRetriever
def test_create() -> None:
... |
# Copyright (c) OpenMMLab. All rights reserved.
"""This module defines the :class:`NiceRepr` mixin class, which defines a
``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
method, which you must define. This means you only have to overload one
function instead of two. Furthermore, if the ob... | """This module defines the :class:`NiceRepr` mixin class, which defines a
``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
method, which you must define. This means you only have to overload one
function instead of two. Furthermore, if the object defines a ``__len__``
method, then the ``__... |
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to... | # Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to... |
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.... | from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.... |
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=... | _base_ = './faster_rcnn_r50_caffe_c4_1x_coco.py'
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=... |
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""Dataclass that bundles components for performing source separation.
Example
... | from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""torchaudio.pipelines.SourceSeparationBundle()
Dataclass that bundles components... |
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / ... | # coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / ... |
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Union
import torch
from mmengine.device import is_cuda_available, is_musa_available
from mmengine.dist.utils import master_only
from mmengine.logging import MMLogger, print_log
class TimeCounter:
"""A tool that counts the a... | # Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Union
import torch
from mmengine.dist.utils import master_only
from mmengine.logging import MMLogger, print_log
class TimeCounter:
"""A tool that counts the average running time of a function or a method.
Users can use ... |
"""**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# for type checking and IDE support, we include the imports here
# but we don't want to eagerly import them at runtim... | """**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# for type checking and IDE support, we include the imports here
# but we don't want to eagerly import them at runtim... |
import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
... | import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
... |
from typing import List
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset... | from torch.utils.data import Dataset
from typing import List
from ..readers.InputExample import InputExample
import numpy as np
from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | class UnusableObjectError(NotImplementedError):
...
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecay as CosineDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecayRestarts as C... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecayRestarts,
)
from keras.src.optimizers... |
import pytest
from llama_index.core import MockEmbedding
from llama_index.core.chat_engine.condense_plus_context import (
CondensePlusContextChatEngine,
)
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.llms.mock import MockLLM
from llama_index.core.schema import Document
SYSTEM_PROMPT... | import pytest
from llama_index.core import MockEmbedding
from llama_index.core.chat_engine.condense_plus_context import (
CondensePlusContextChatEngine,
)
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.llms.mock import MockLLM
from llama_index.core.schema import Document
SYSTEM_PROMPT... |
# Copyright (c) OpenMMLab. All rights reserved.
from ._deepspeed import DeepSpeedOptimWrapper
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .base import BaseOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
bui... | # Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import is_installed
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .base import BaseOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_opt... |
from typing import Literal
from pydantic import SecretStr
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
OAuth2Credentials,
)
from backend.integrations.providers import ProviderName
from backend.util.settings import Secrets
secrets = Secrets()
GITHUB_OAUTH... | from typing import Literal
from pydantic import SecretStr
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
OAuth2Credentials,
)
from backend.integrations.providers import ProviderName
from backend.util.settings import Secrets
secrets = Secrets()
GITHUB_OAUTH... |
from typing import Any, Optional, Union
import torch
from torch import nn
def _replace_relu(module: nn.Module) -> None:
reassign = {}
for name, mod in module.named_children():
_replace_relu(mod)
# Checking for explicit type instead of instance
# as we only want to replace modules of t... | from typing import Any, List, Optional, Union
import torch
from torch import nn
def _replace_relu(module: nn.Module) -> None:
reassign = {}
for name, mod in module.named_children():
_replace_relu(mod)
# Checking for explicit type instead of instance
# as we only want to replace module... |
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
... | _base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
... |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class LanguageModeling(TaskTemplate):
task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True})
... | from dataclasses import dataclass
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class LanguageModeling(TaskTemplate):
task: str = "language-modeling"
input_schema: ClassVar[Features] = Features({"text": Value("string")})
l... |
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import SentenceTransformerModelCardCallback, SentenceTransformerModelCardData
from sentence_transformers.util import is_datase... | from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import SentenceTransformerModelCardCallback, SentenceTransformerModelCardData
from sentence_transformers.util import is_datase... |
# Copyright (c) Meta Platforms, Inc. and affiliates
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class LlamaLLM(OpenAILike):
"""
Llama LLM.
Examples:
`pip install llama-index-llms-meta`
```python
from llama_index.llms.meta import... | import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class LlamaLLM(OpenAILike):
"""
Llama LLM.
Examples:
`pip install llama-index-llms-meta`
```python
from llama_index.llms.meta import LlamaLLM
# set api key in env or in llm
... |
from typing import TYPE_CHECKING, Optional, Dict
if TYPE_CHECKING:
from ... import DocumentArray
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
... | from typing import TYPE_CHECKING, Optional, Dict
if TYPE_CHECKING:
from ... import DocumentArray
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
... |
import logging
import typing
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import prisma
import backend.data.graph
import backend.integrations.creds_manager
import backend.integrations.webhooks.graph_lifecycle_hooks
import backend.server.v2.library.db
import backend.server.v2.lib... | import logging
import typing
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import backend.data.graph
import backend.server.v2.library.db
import backend.server.v2.library.model
logger = logging.getLogger(__name__)
router = fastapi.APIRouter()
@router.get(
"/agents",
ta... |
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = Ty... | import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = Ty... |
from typing import Type, TYPE_CHECKING
from docarray import Document
if TYPE_CHECKING:
from docarray.typing import T
class EmptyMixin:
"""Helper functions for building arrays with empty Document."""
@classmethod
def empty(cls: Type['T'], size: int = 0, *args, **kwargs) -> 'T':
"""Create a :... | from typing import Type, TYPE_CHECKING
from ... import Document
if TYPE_CHECKING:
from ...typing import T
class EmptyMixin:
"""Helper functions for building arrays with empty Document."""
@classmethod
def empty(cls: Type['T'], size: int = 0, *args, **kwargs) -> 'T':
"""Create a :class:`Docu... |
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .lvis_metric import LVISMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
... | # Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'C... |
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request impor... | from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request impor... |
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
... | """
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
import xgboost as xgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
... |
"""Guideline evaluation."""
import asyncio
import logging
from typing import Any, Optional, Sequence, Union, cast
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.llms.llm import LLM
from llama_index.core.o... | """Guideline evaluation."""
import asyncio
import logging
from typing import Any, Optional, Sequence, Union, cast
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.llms.llm import LLM
from llama_index.core.o... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# data
train_dataloader = dict(batch_size=8)
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained'... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# data
data = dict(samples_per_gpu=8)
# optimizer
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', c... |
# Owner(s): ["oncall: distributed"]
import json
import logging
import re
import sys
from functools import partial, wraps
import torch
import torch.distributed as dist
from torch.distributed.c10d_logger import _c10d_logger, _exception_logger
if not dist.is_available():
print("Distributed not available, skipping ... | # Owner(s): ["oncall: distributed"]
import json
import logging
import os
import re
import sys
from functools import partial, wraps
import torch
import torch.distributed as dist
from torch.distributed.c10d_logger import _c10d_logger, _exception_logger
if not dist.is_available():
print("Distributed not available,... |
__all__ = ['filter_docs']
import json
from typing import Dict, List, Union
from docarray.array.abstract_array import AnyDocArray
from docarray.array.array.array import DocArray
def filter_docs(
docs: AnyDocArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocArray:
"""
Filter the Documents in the ... | import json
from typing import Dict, List, Union
from docarray.array.abstract_array import AnyDocArray
from docarray.array.array.array import DocArray
def filter_docs(
docs: AnyDocArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocArray:
"""
Filter the Documents in the index according to the give... |
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow ... | import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.exp... |
"""Math utils."""
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
logger = logging.getLogger(__name__)
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-wi... | """Math utils."""
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
logger = logging.getLogger(__name__)
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-wi... |
import logging
import sentry_sdk
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,... | import logging
import sentry_sdk
from backend.util.settings import Settings
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.launchdarkly import LaunchDarklyIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
def sentry_init():
sentry_dsn = Se... |
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow
from jina_commons.indexers.dump import import_metas, import_vectors
from ..mongo_storage import doc_without_embedding
NUM_DOCS = 10
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose... | import os
from pathlib import Path
import pytest
import numpy as np
from jina import Document, DocumentArray, Flow, Executor
from jina_commons.indexers.dump import import_vectors, import_metas
from ..mongo_storage import doc_without_embedding
NUM_DOCS = 10
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... |
import os
import urllib
import numpy as np
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CU... | import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..'... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.7.1'
mmengi... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.6.0'
mmengi... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,... |
r"""
PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference.
Profiler's context manager API can be used to better understand what model operators are the most expensive,
examine their input shapes and stack traces, study device kernel activity and visualize the execut... | # mypy: allow-untyped-defs
r"""
PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference.
Profiler's context manager API can be used to better understand what model operators are the most expensive,
examine their input shapes and stack traces, study device kernel activi... |
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_... | """
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_v... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_v... |
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (... | # Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .ema import ExpMomentumEMA
from... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
register_all_modules()
clas... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
class TestSingleStageInstanceSegmentor(TestCa... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.jaxarray import JaxArray, metaJax
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_jaxarray')
class ImageJaxArray(JaxArray, AbstractImage... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checki... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class GFL(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=No... | from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class GFL(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
... |
"""Xgboost training summary integration submodule."""
from dataclasses import dataclass, field
from typing import Dict, List
@dataclass
class XGBoostTrainingSummary:
"""
A class that holds the training and validation objective history
of an XGBoost model during its training process.
"""
train_ob... | """Xgboost training summary integration submodule."""
from dataclasses import dataclass, field
from typing import Dict, List
@dataclass
class XGBoostTrainingSummary:
"""
A class that holds the training and validation objective history
of an XGBoost model during its training process.
"""
train_ob... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/coco/'
model = dict(test_cfg=dict(
max_per_img=300,
chunked_size=40,
))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
ty... | _base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/lvis/'
model = dict(test_cfg=dict(
max_per_img=300,
chunked_size=40,
))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
ty... |
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualiz... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualiz... |
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
_RELEASE_CONFIGS = {
"release1": {
"folder_in_... | import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
_RELEASE_CONFIGS = {
"release1": {
"folder_in_... |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init_... | from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init_... |
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"... | from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"... |
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core... | """**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.callbacks.base im... |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
"""Fake LLM wrapper for testing purposes."""
from collections.abc import Mapping
from typing import Any, Optional, cast
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import model_validator
class FakeLLM(LLM):
"""Fake LLM w... | """Fake LLM wrapper for testing purposes."""
from collections.abc import Mapping
from typing import Any, Optional, cast
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import model_validator
class FakeLLM(LLM):
"""Fake LLM w... |
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFi... | _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
... |
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pyda... | """Question-answering with sources over a vector database."""
import warnings
from typing import Any, Dict, List
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorSto... |
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],... | import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],... |
from __future__ import annotations
import csv
import gzip
import os
from collections.abc import Generator
import pytest
from torch.utils.data import DataLoader
from sentence_transformers import CrossEncoder, util
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
from sentenc... | from __future__ import annotations
import csv
import gzip
import os
from collections.abc import Generator
import pytest
from torch.utils.data import DataLoader
from sentence_transformers import CrossEncoder, util
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
from sentenc... |
"""Standard LangChain interface tests for Responses API"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_openai import ChatOpenAI
from tests.integration_tests.chat_models.test_base_standard import TestOpenAIStandard
class TestOpenAIResponses(TestOpenAIStandard):
@property... | """Standard LangChain interface tests for Responses API"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_openai import ChatOpenAI
from tests.integration_tests.chat_models.test_base_standard import TestOpenAIStandard
class TestOpenAIResponses(TestOpenA... |
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch... | """Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide... | import mmcv
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, ... |
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementati... | import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementati... |
import os
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_nam... | import os
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_nam... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.