input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
r'''
FX is a toolkit for developers to use to transform ``nn.Module``
instances. FX consists of three main components: a **symbolic tracer,**
an **intermediate representation**, and **Python code generation**. A
demonstration of these components in action:
::
import torch
# Simple module for demonstration
... | r'''
FX is a toolkit for developers to use to transform ``nn.Module``
instances. FX consists of three main components: a **symbolic tracer,**
an **intermediate representation**, and **Python code generation**. A
demonstration of these components in action:
::
import torch
# Simple module for demonstration
... |
_base_ = './mask-rcnn_hrnetv2p-w32-1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
b... | _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
b... |
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__ = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all foun... | # coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library path... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CentripetalHead
class TestCentripetalHead(TestCase):
def test_centripetal_head_loss(self):
"""Tests corner head loss when truth is... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import CentripetalHead
class TestCentripetalHead(TestCase):
def test_centripetal_head_loss(self):
"""Tests corner head loss when truth is empty... |
from urllib.parse import urlparse
from backend.blocks.github._auth import (
GithubCredentials,
GithubFineGrainedAPICredentials,
)
from backend.util.request import Requests
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles rep... | from urllib.parse import urlparse
from backend.blocks.github._auth import (
GithubCredentials,
GithubFineGrainedAPICredentials,
)
from backend.util.request import Requests
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles rep... |
"""Tools for model selection, such as cross validation and hyper-parameter tuning."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import typing
from ._classification_threshold import (
FixedThresholdClassifier,
TunedThresholdClassifierCV,
)
from ._plot import LearningCurveD... | """Tools for model selection, such as cross validation and hyper-parameter tuning."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import typing
from ._classification_threshold import (
FixedThresholdClassifier,
TunedThresholdClassifierCV,
)
from ._plot import LearningCurveD... |
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHECKING:
import tensorf... | from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHECKING:
import tensorf... |
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codeca... | # THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codeca... |
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T =... | from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T =... |
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library path... | # coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from os import environ
from pathlib import Path
from platform import system
from typing import List
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of... |
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
classes = ('person', )
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/fa... | _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
classes = ('person', )
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rc... |
import weakref
from keras.src.backend.common import global_state
def _clear_tensor_attr(tensor_id, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None and tensor_id in attr_dict:
del attr_dict[tensor_id]
def set_tensor_attr(tensor, attr, value):
try:
... | import weakref
from keras.src.backend.common import global_state
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, attr, value)
except AttributeError:
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is None:
if value is None:
... |
import asyncio
from typing import AsyncIterator, Iterator, Optional, Union
from jina.helper import get_or_reuse_loop
class _RequestsCounter:
"""Class used to wrap a count integer so that it can be updated inside methods.
.. code-block:: python
def count_increment(i: int, rc: _RequestsCounter):
... | import asyncio
from typing import AsyncIterator, Iterator, Optional, Union
from jina.helper import get_or_reuse_loop
class RequestsCounter:
"""Class used to wrap a count integer so that it can be updated inside methods.
.. code-block:: python
def count_increment(i: int, rc: RequestCounter):
... |
from typing import Union
import torch
from PIL import Image
from torchvision import transforms as tfms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
... | from typing import Union
import torch
from PIL import Image
from torchvision import transforms as tfms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
... |
default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=di... | default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=di... |
from typing import Any
from unittest.mock import patch, MagicMock
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.llms import LLMMetadata, CompletionResponse, Completion... | from typing import Any
from unittest.mock import patch, MagicMock
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.llms import LLMMetadata, CompletionResponse, Completion... |
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_A... | import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_A... |
from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple
if TYPE_CHECKING:
from ... import DocumentArray
from ...typing import AnyDNN, T, ArrayType
import numpy as np
class SingletonSugarMixin:
"""Provide sugary syntax for :class:`Document` by inheriting methods from :class:`Docu... | from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple
if TYPE_CHECKING:
from ... import DocumentArray
from ...typing import AnyDNN, T, ArrayType
import numpy as np
class SingletonSugarMixin:
"""Provide sugary syntax for :class:`Document` by inheriting methods from :class:`Docu... |
import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information... | import torch
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample
from sentence_transformers import losses
import os
import gzip
import csv
from datetime import datetime
import logging
#### Just some ... |
_base_ = '../ssd/ssd512_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
| _base_ = '../ssd/ssd512_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.neo4j import Neo4jGraphStore
def test_neo4j_graph_store():
names_of_bases = [b.__name__ for b in Neo4jGraphStore.__bases__]
assert GraphStore.__name__ in names_of_bases
| from unittest.mock import MagicMock, patch
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.neo4j import Neo4jGraphStore
@patch("llama_index.graph_stores.neo4j.Neo4jGraphStore")
def test_neo4j_graph_store(MockNeo4jGraphStore: MagicMock):
instance: Neo4jGraphStore = MockNeo... |
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_size, is_simple_tensor
def query_bounding_boxes(flat_... | from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_spatial_size, is_simple_tensor
def query_bounding_box... |
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
m... | from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
m... |
"""Question-answering with sources over an index."""
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
from l... | """Question-answering with sources over an index."""
from typing import Any, Dict, List
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import F... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.base import create_openapi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.base import create_openapi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling... |
from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
from .CECorrelationEvaluator import CECorrelationEvaluator
from .CEF1Evaluator import CEF1Evaluator
from .CERerankingEvaluator import CERerankingEvaluator
from .CESoftmaxAccuracy... | from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
from .CEF1Evaluator import CEF1Evaluator
from .CECorrelationEvaluator import CECorrelationEvaluator
from .CESoftmaxAccuracyEvaluator import CESoftmaxAccuracyEvaluator
from .CERer... |
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.data import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, c... | import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.data import InstanceData
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx ... |
from langchain_core.agents import AgentAction
def format_xml(
intermediate_steps: list[tuple[AgentAction, str]],
) -> str:
"""Format the intermediate steps as XML.
Args:
intermediate_steps: The intermediate steps.
Returns:
The intermediate steps as XML.
"""
log = ""
for a... | from typing import List, Tuple
from langchain_core.agents import AgentAction
def format_xml(
intermediate_steps: List[Tuple[AgentAction, str]],
) -> str:
"""Format the intermediate steps as XML.
Args:
intermediate_steps: The intermediate steps.
Returns:
The intermediate steps as XML... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .data... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .dataset_wrappers import MultiImageMixDataset
f... |
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fn: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the Cross... | from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the Cros... |
from jina.clients.base.grpc import GRPCBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncPostMixin,
HealthCheckMixin,
PostMixin,
ProfileMixin,
)
class GRPCClient(GRPCBaseClient, PostMixin, HealthCheckMixin, ProfileMixin):
"""A client connecting to a Gateway using gRPC pro... | from jina.clients.base.grpc import GRPCBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncPostMixin,
HealthCheckMixin,
PostMixin,
)
class GRPCClient(GRPCBaseClient, PostMixin, HealthCheckMixin):
"""A client connecting to a Gateway using gRPC protocol.
Instantiate this cla... |
# Copyright (c) OpenMMLab. All rights reserved.
from .amp import autocast
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
get_torchvision... |
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_... | import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_singl... |
from typing import TYPE_CHECKING, Optional, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bo... | from typing import TYPE_CHECKING, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUr... |
"""
Use scikit-learn regressor interface with GPU histogram tree method
===================================================================
"""
import dask
from dask import array as da
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
fr... | """
Use scikit-learn regressor interface with GPU histogram tree method
===================================================================
"""
from dask import array as da
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
from xgboost i... |
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutMix, RandomMixUp, SimpleCopyPaste
from ._geometry import FixedSizeCrop
from ._misc import PermuteDimensions, TransposeDimensions
from ._type_conversion import LabelToOneHot
| from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomMixup, SimpleCopyPaste
from ._geometry import FixedSizeCrop
from ._misc import PermuteDimensions, TransposeDimensions
from ._type_conversion import LabelToOneHot
|
import importlib
import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from ...utils import needs_py39, needs_py310
@pytest.fixture(
name="client",
params=[
"tutorial003",
pytest.param("tutorial003_py310", marks=needs_py310),
"tutorial003_an",
... | import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from docs_src.header_params.tutorial003 import app
client = TestClient(app)
@pytest.mark.parametrize(
"path,headers,expected_status,expected_response",
[
("/items", None, 200, {"X-Token values": None}),
(... |
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...flair_text import FlairTextEncoder
_EMBEDDING_DIM = 100
@pytest.fixture(scope='session')
def basic_encoder() -> FlairTextEncoder:
return FlairTextEncoder()
def test_config():
ex = Exe... | from pathlib import Path
import numpy as np
import pytest
from jina import DocumentArray, Document, Executor
from ...flair_text import FlairTextEncoder
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_config():
ex = Executor.load_conf... |
from __future__ import annotations
import os
import tempfile
def is_ci() -> bool:
"""
Check if the code is running in a Continuous Integration (CI) environment.
This is determined by checking for the presence of certain environment variables.
"""
return "GITHUB_ACTIONS" in os.environ
class Safe... | from __future__ import annotations
import tempfile
class SafeTemporaryDirectory(tempfile.TemporaryDirectory):
"""
The GitHub Actions CI on Windows sometimes raises a NotADirectoryError when cleaning up the temporary directory.
This class is a workaround to avoid the error.
Unlike tempfile.TemporaryD... |
PREFIX = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text base... | # flake8: noqa
PREFIX = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataSample
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataSample
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
... |
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
from backend.integrations.providers import ProviderName
FalCredentials = APIKeyCredentials
FalCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.FAL],
... | from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
FalCredentials = APIKeyCredentials
FalCredentialsInput = CredentialsMetaInput[
Literal["fal"],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id... |
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import subprocess
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# When executing `import mmengine.runner`,
# pkg_res... | # Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import subprocess
import pkg_resources
from pkg_resources import get_distribution
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
... | # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
from typing import Any
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import Runnable, RunnableLambda
from .parsers import RoleMap
from .utils import load, prepare
def create_chat_prompt(
path: str,
input_name_agent_scratchpad: str = "agent_scratchpa... | from typing import Any, Dict
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import Runnable, RunnableLambda
from .parsers import RoleMap
from .utils import load, prepare
def create_chat_prompt(
path: str,
input_name_agent_scratchpad: str = "agent_scr... |
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incom... | from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_la... |
from .audioclip_text import AudioCLIPTextEncoder
| from .audioclip_text import AudioCLIPTextEncoder |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from docarray.documents.legacy.legacy_document import LegacyDocument
__all__ = ['LegacyDocument']
|
from ._source_separation_pipeline import (
CONVTASNET_BASE_LIBRI2MIX,
HDEMUCS_HIGH_MUSDB,
HDEMUCS_HIGH_MUSDB_PLUS,
SourceSeparationBundle,
)
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHO... | from ._source_separation_pipeline import (
CONVTASNET_BASE_LIBRI2MIX,
HDEMUCS_HIGH_MUSDB,
HDEMUCS_HIGH_MUSDB_PLUS,
SourceSeparationBundle,
)
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHO... |
from typing import Any, Literal, Optional
import pytest
import re
import respx
import json
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
from llama_index.core.schema import NodeWithScore, Document
@pytest.fixture()
def mock_v1_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get("https... | from typing import Any, Literal, Optional
import pytest
import re
from requests_mock import Mocker
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
from llama_index.core.schema import NodeWithScore, Document
@pytest.fixture()
def mock_v1_models(requests_mock: Mocker) -> None:
requests_mock.get(
... |
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_4.0gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_gr... | _base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_4.0gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_gr... |
"""Patentsview reader that reads patent abstract."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
BASE_URL = "https://api.patentsview.org/patents/query"
class PatentsviewReader(BaseReader):
"""
Patentsview reader.
... | """Patentsview reader that reads patent abstract."""
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
BASE_URL = "https://api.patentsview.org/patents/query"
class PatentsviewReader(BaseReader):
"""
Patentsview reader.
... |
from typing import Optional
import torch
from ..modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from ..utils import logging
logger = logging.get_logger(__name__)
_use_top_left_mask = flash_attn_supports_top_left_mask()
def flash_attention_forward(
module: tor... | from typing import Optional, Tuple
import torch
from ..modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from ..utils import logging
logger = logging.get_logger(__name__)
_use_top_left_mask = flash_attn_supports_top_left_mask()
def flash_attention_forward(
modu... |
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `Senten... | from __future__ import annotations
import os
from . import InputExample
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers
"""
def __init__(... |
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCasca... | # Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCasca... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .masktrack_rcnn_tracker import MaskTrackRCNNTracker
from .quasi_dense_tracker import QuasiDenseTracker
from .sort_tracker import SORTTracker
__all__ = [
'BaseTracker', 'ByteTracker', 'Qu... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .quasi_dense_tracker import QuasiDenseTracker
from .sort_tracker import SORTTracker
__all__ = ['BaseTracker', 'ByteTracker', 'QuasiDenseTracker', 'SORTTracker']
|
"""Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does not use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ... | """Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bi... |
import multiprocessing
import pytest
from jina import DocumentArray, Executor, requests
from jina.parsers import set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from jina.serve.streamer import GatewayStreamer
class StreamerTestExecutor(... | import multiprocessing
import pytest
from jina import DocumentArray, Executor, requests
from jina.parsers import set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from jina.serve.streamer import GatewayStreamer
class StreamerTestExecutor(... |
from typing import Union, Optional, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _insert_doc_at_idx(self, doc, idx: Optional[int] = None):
if idx ... | from typing import Union, Optional, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _insert_doc_at_idx(self, doc, idx: Optional[int] = None):
if idx ... |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optimizer = dict(type='SGD', lr=0.08, momen... | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optimizer = dict(type='SGD', lr=0.08, momen... |
import numpy as np
import scipy.signal
from keras.src import backend
from keras.src import initializers
from keras.src import testing
class ConstantInitializersTest(testing.TestCase):
def test_zeros_initializer(self):
shape = (3, 3)
initializer = initializers.Zeros()
values = initializer... | import numpy as np
from keras.src import backend
from keras.src import initializers
from keras.src import testing
class ConstantInitializersTest(testing.TestCase):
def test_zeros_initializer(self):
shape = (3, 3)
initializer = initializers.Zeros()
values = initializer(shape=shape)
... |
from abc import ABC
class BaseStandardTests(ABC):
"""
:private:
"""
def test_no_overrides_DO_NOT_OVERRIDE(self) -> None:
"""
Test that no standard tests are overridden.
:private:
"""
# find path to standard test implementations
comparison_class = None
... | from abc import ABC
from typing import Type
class BaseStandardTests(ABC):
"""
:private:
"""
def test_no_overrides_DO_NOT_OVERRIDE(self) -> None:
"""
Test that no standard tests are overridden.
:private:
"""
# find path to standard test implementations
... |
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.prototype.transforms.utils import is_simple_ten... | from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.prototype.transforms.utils import is_simple_ten... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
prepr... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
prepr... |
"""
Utility Tools for the Portkey Class.
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
"""
from typing import TYPE_CHECKING, List
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.llms.anthropic import Anth... | """
Utility Tools for the Portkey Class.
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
"""
from typing import TYPE_CHECKING, List
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.llms.anthropic import Anth... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=601))
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config =... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=601))
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config =... |
from unittest import TestCase
import numpy as np
from mmengine.registry import init_default_scope
from mmdet.registry import TASK_UTILS
class TestKalmanFilter(TestCase):
@classmethod
def setUpClass(cls):
init_default_scope('mmdet')
motion = dict(type='KalmanFilter', )
cls.kf = TASK_... | from unittest import TestCase
import numpy as np
from mmdet.registry import TASK_UTILS
from mmdet.utils import register_all_modules
class TestKalmanFilter(TestCase):
@classmethod
def setUpClass(cls):
register_all_modules()
motion = dict(type='KalmanFilter', )
cls.kf = TASK_UTILS.bui... |
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .sabl_head import SABLHead
from .... | from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .sabl_head import SABLHead
from .scnet_bbox_head import SCNetBBoxHead
__all__ = ... |
from .paddle_image import ImagePaddlehubEncoder
| from .paddle_image import ImagePaddlehubEncoder |
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is t... | """Run smoke tests"""
import os
from pathlib import Path
from sys import platform
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(... |
import argparse
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
... | import argparse
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
... |
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
... | from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
... |
from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
... | from jina import DocumentArray, Executor, Flow, requests
def test_gateway_metric_labels(monkeypatch_metric_exporter):
collect_metrics, read_metrics = monkeypatch_metric_exporter
class FirstExec(Executor):
@requests()
def meow(self, docs, **kwargs):
return DocumentArray.empty(3)
... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'le... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'le... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import OpenImagesChallengeDataset, OpenImagesDataset
class TestOpenImagesDataset(unittest.TestCase):
def test_init(self):
dataset = OpenImagesDataset(
data_root='tests/data/OpenImages/',
ann_file=... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import OpenImagesChallengeDataset, OpenImagesDataset
class TestOpenImagesDataset(unittest.TestCase):
def test_init(self):
dataset = OpenImagesDataset(
data_root='tests/data/OpenImages/',
ann_file=... |
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
from backend.data.execution import (
GraphExecutionMeta,
NodeExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_incomplete_node_executions,
get_latest_node_execution,
get_node_execution_results,... | from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_ex... |
"""
Python polyfills for operator
"""
from __future__ import annotations
import operator
from typing import Any, Callable, overload, TYPE_CHECKING, TypeVar
from typing_extensions import TypeVarTuple, Unpack
from ..decorators import substitute_in_graph
if TYPE_CHECKING:
from collections.abc import Iterable
# ... | """
Python polyfills for operator
"""
from __future__ import annotations
import operator
from typing import Any, Callable, overload, TypeVar
from typing_extensions import TypeVarTuple, Unpack
from ..decorators import substitute_in_graph
# Most unary and binary operators are handled by BuiltinVariable (e.g., `pos`,... |
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_dista... | from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""
The metric for the contrastive loss
"""
EUCLIDEAN = lambda x, y: F.pair... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Qdrant
from langchain_community.vectorstores.qdrant import QdrantException
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for ... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Qdrant
from langchain_community.vectorstores.qdrant import QdrantException
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for ... |
_base_ = './htc_hrnetv2p-w40_20e_coco.py'
# learning policy
max_epochs = 28
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epo... | _base_ = './htc_hrnetv2p_w40_20e_coco.py'
# learning policy
max_epochs = 28
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epo... |
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeS... | _base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeS... |
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBu... | from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
ToD... |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... |
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilari... | import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilari... |
"""Helper script for triggering Read the docs build.
See `doc/contrib/docs.rst <https://xgboost.readthedocs.io/en/stable/contrib/docs.html>`__
for more info.
"""
import json
import os
import pprint
from http.client import responses as http_responses
import requests # type: ignore
def trigger_build(token: str) ->... | """Helper script for triggering Read the docs build.
See `doc/contrib/docs.rst <https://xgboost.readthedocs.io/en/stable/contrib/docs.html>`__
for more info.
"""
import json
import os
import pprint
from http.client import responses as http_responses
import requests # type: ignore
def trigger_build(token: str) ->... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIExtractor(Base... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.core.utils.typing import ConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIEx... |
import csv
import logging
import os
from typing import List
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or mo... | import logging
import os
import csv
from typing import List
from ... import InputExample
import numpy as np
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measu... |
# Owner(s): ["module: dynamo"]
import unittest
from torch._dynamo import config
from torch._dynamo.testing import make_test_cls_with_patches
try:
from . import test_export
except ImportError:
import test_export
test_classes = {}
def make_dynamic_cls(cls):
suffix = "_inline_and_install"
cls_prefi... | # Owner(s): ["module: dynamo"]
import unittest
from torch._dynamo import config
from torch._dynamo.testing import make_test_cls_with_patches
try:
from . import test_export
except ImportError:
import test_export
test_classes = {}
def make_dynamic_cls(cls):
suffix = "_inline_and_install"
cls_prefi... |
"""Retrieve query."""
import logging
from typing import Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.query.schema import QueryBundle
from llama_index.core.indices.tree.base import TreeIndex
... | """Retrieve query."""
import logging
from typing import Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.query.schema import QueryBundle
from llama_index.core.indices.tree.base import TreeIndex
f... |
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely simila... | """
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely simila... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
tea... | import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseMSEEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("nav... |
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssign... | # Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssign... |
from typing import Any, Optional
from typing_extensions import override
from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
from langchain_core.globals import set_llm_cache
from langchain_core.language_models import FakeListLLM
class InMemoryCache(BaseCache):
"""In-memory cache used for testing purpose... | from typing import Any, Optional
from typing_extensions import override
from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
from langchain_core.globals import set_llm_cache
from langchain_core.language_models import FakeListLLM
class InMemoryCache(BaseCache):
"""In-memory cache used for testing purpose... |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.core import url_to_fs
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem
from .utils import requir... | import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def tes... |
from typing import Any
def get_prompt_input_key(inputs: dict[str, Any], memory_variables: list[str]) -> str:
"""
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
"""
# "stop" is a special key that can be p... | from typing import Any
def get_prompt_input_key(inputs: dict[str, Any], memory_variables: list[str]) -> str:
"""
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
"""
# "stop" is a special key that can be p... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import IterTimerHook
class TestIterTimerHook:
def test_before_epoch(self):
Hook = IterTimerHook()
Runner = Mock()
Hook._before_epoch(Runner)
assert isinstance(Hook.t, float)
de... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import IterTimerHook
class TestIterTimerHook:
def test_before_epoch(self):
Hook = IterTimerHook()
Runner = Mock()
Hook.before_epoch(Runner)
assert isinstance(Hook.t, float)
def... |
from datetime import datetime
from typing import Any, List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request im... | from datetime import datetime
from typing import Any, List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request im... |
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmde... | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import DATASETS
from mmengine.runner import Runner
from tor... |
import json
import multiprocessing
import os
import time
import pytest
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
_validate_cu... | import json
import multiprocessing
import os
import time
import pytest
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
ProcessExecu... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.