input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
import multiprocessing
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import GatewayProtocolType, PodRoleType
from jina.parsers.helper import _set_gateway_uses
if TYPE_... | import multiprocessing
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import GatewayProtocolType, PodRoleType
if TYPE_CHECKING:
from argparse import Namespace
def... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from docarray.base_doc.mixins.io import IOMixin
from docarray.base_doc.mixins.update import UpdateMixin
__all__ = ['IOMixin', 'UpdateMixin']
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.4"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from... | """FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.3"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from... |
from typing_extensions import TYPE_CHECKING
from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import An... | from typing_extensions import TYPE_CHECKING
from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import An... |
import socket
import sys
from threading import Thread
import numpy as np
import pytest
from loky import get_reusable_executor
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
from xgboost import testing as tm
def run_rabit_worker(rabit_env: dict, world_size: int) -> int:
with xgb.co... | import multiprocessing
import socket
import sys
from threading import Thread
import numpy as np
import pytest
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
from xgboost import testing as tm
def run_rabit_worker(rabit_env, world_size):
with xgb.collective.CommunicatorContext(**rab... |
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import IOMixin,... | import os
from typing import Type, Optional, TypeVar
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
import pickle
import base64
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
fro... |
import logging
from backend.util.settings import AppEnvironment, BehaveAs, Settings
settings = Settings()
def configure_logging():
import autogpt_libs.logging.config
if (
settings.config.behave_as == BehaveAs.LOCAL
or settings.config.app_env == AppEnvironment.LOCAL
):
autogpt_li... | from logging import Logger
from backend.util.settings import AppEnvironment, BehaveAs, Settings
settings = Settings()
def configure_logging():
import logging
import autogpt_libs.logging.config
if (
settings.config.behave_as == BehaveAs.LOCAL
or settings.config.app_env == AppEnvironment... |
from pydantic import BaseModel
from typing import Dict
def _to_camel_case(snake_str: str) -> str:
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + ''.join(x.title() for x i... | from pydantic import BaseModel
class JinaHealthModel(BaseModel):
"""Pydantic BaseModel for Jina health check, used as the response model in REST app."""
...
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2nda... | # Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2nda... |
"""Loading a pickled model generated by test_pickling.py, only used by
`test_gpu_with_dask.py`"""
import json
import os
import numpy as np
import pytest
from test_gpu_pickling import build_dataset, load_pickle, model_path
import xgboost as xgb
from xgboost import testing as tm
class TestLoadPickle:
def test_lo... | """Loading a pickled model generated by test_pickling.py, only used by
`test_gpu_with_dask.py`"""
import json
import os
import numpy as np
import pytest
from test_gpu_pickling import build_dataset, load_pickle, model_path
import xgboost as xgb
from xgboost import testing as tm
class TestLoadPickle:
def test_loa... |
from typing import Union, Optional, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _insert_doc_at_idx(self, doc, idx: Optional[int] = None):
if idx ... | from typing import Union, Optional, Iterable
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _insert_doc_at_idx(self, doc, idx: Optional[int] = None):
if idx is None:
idx ... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling ... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling ... |
from __future__ import annotations
import logging
import torch
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
logger = logging.getLogger(__name__)
class WordWeights(Module):
"""This model can weight word embeddings, for example, with idf-values."""
config_keys: list[s... | from __future__ import annotations
import json
import logging
import os
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: list[str], word_weights:... |
import urllib.request
from typing import List
from defusedxml.ElementTree import fromstring
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.web import AsyncWebPageReader
XML_SITEMAP_SCHEMA = "http://www.sitemaps.org/schemas/sitemap/0.9"
STRIPE... | import urllib.request
import xml.etree.ElementTree as ET
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.web import AsyncWebPageReader
XML_SITEMAP_SCHEMA = "http://www.sitemaps.org/schemas/sitemap/0.9"
STRIPE_SITEMAP_UR... |
"""Multion tool spec."""
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class MultionToolSpec(BaseToolSpec):
"""Multion tool spec."""
spec_functions = ["browse"]
def __init__(self, api_key: str) -> None:
"""Initialize with parameters."""
from multion.client import MultiO... | """Multion tool spec."""
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class MultionToolSpec(BaseToolSpec):
"""Multion tool spec."""
spec_functions = ["browse"]
def __init__(self, api_key: str) -> None:
"""Initialize with parameters."""
from multion.client import MultiOn... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseE... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseE... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from kera... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from kera... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import subprocess
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# When executing `import mmengine.runner`,
# pkg_resources will be im... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import subprocess
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# When executing `import mmengine.runner`,
# pkg_resources will be im... |
_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(type='LoadImageFr... | _base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(
type='Lo... |
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and i... | from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and i... |
from __future__ import annotations
import logging
import time
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_commu... | from __future__ import annotations
import logging
import time
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_commu... |
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class WSJ0Mix(Dataset):
"""Create a Dataset for wsj0-mix.
Args:
root (str or Path): Path to the directory wher... | from pathlib import Path
from typing import Union, Tuple, List
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class WSJ0Mix(Dataset):
"""Create a Dataset for wsj0-mix.
Args:
root (str or Path): Path to the directory wher... |
from docarray.array.documentarray import DocumentArray
| from .documentarray import DocumentArray
|
"""
This examples measures the inference speed of a certain model
Usage:
python evaluation_inference_speed.py
OR
python evaluation_inference_speed.py model_name
"""
import sys
import time
import torch
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
# Limit torch to 4 threads... | """
This examples measures the inference speed of a certain model
Usage:
python evaluation_inference_speed.py
OR
python evaluation_inference_speed.py model_name
"""
import sys
import time
import torch
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
# Limit torch to 4 threads... |
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
ty... | _base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
ty... |
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# optim_wrapper = dict(type='AmpOptimWrapper')
_base_.optim_wrapper.type = 'AmpOptimWrapper'
| _base_ = './faster-rcnn_r50_fpn_1x_coco.py'
# fp16 settings
fp16 = dict(loss_scale=512.)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, Flow
from torch_object_detection_segmenter import TorchObjectDetectionSegmenter
def test_exec():
f = Flow().add(uses=TorchObjectDetectionSegmenter... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, Flow
from ...torch_object_detection_segmenter import TorchObjectDetectionSegmenter
def test_exec():
f = Flow().add(uses=TorchObjectDetectionSegme... |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.main import * # noqa: F403
except ImportError:
from pydantic.main import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
... | from langchain_core._api import warn_deprecated
try:
from pydantic.v1.main import * # noqa: F403
except ImportError:
from pydantic.main import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-co... |
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was create... | """
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was create... |
_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py'
model = dict(
data_preprocessor=dict(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnot... | _base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py'
model = dict(
data_preprocessor=dict(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),... |
import os
import re
from dataclasses import fields
from pathlib import Path
from docarray.document.data import DocumentData
with open('../docarray/document/mixins/_property.py', 'w') as fp:
fp.write(
f'''# auto-generated from {os.path.relpath(__file__, start=Path(__file__).parent.parent.parent)}
from typi... | import re
from dataclasses import fields
from docarray.document.data import DocumentData
with open('../docarray/document/mixins/_property.py', 'w') as fp:
fp.write(
f'''# auto-generated from {__file__}
from typing import TYPE_CHECKING, Dict, List, Optional
if TYPE_CHECKING:
from ...score import NamedS... |
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_400mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_gr... | _base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_400mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_gr... |
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchSh... | from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
Spectrogram,
InverseSpectrogram,
GriffinLim,
AmplitudeToDB,
MelScale,
InverseMelScale,
MelSpectrogram,
MFCC,
LFCC,
MuLawEncoding,
MuLawDecoding,
Resample,
TimeStretch,
Fade,
... |
# model settings
model = dict(
type='RetinaNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num... | # model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
type='RetinaNet',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_ind... |
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import numpy as np
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
... | # Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import numpy as np
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
from mmdet.utils import register_all_modules
register_all_modules(True)
class MMdetHandl... |
from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import Tensor
__all__ = [
'NdArray',
'Tensor',
'Embedding',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:
p... | from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding, TorchEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import Tensor
from docarray.typing.tensor.torch_tensor import TorchTensor
__all__ = [
'NdArray',
'TorchTensor',
'Tensor',
'Embed... |
import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..uti... | import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..uti... |
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_f... | from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_f... |
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._feature import _Feature, FillTypeJIT
class Mask(_Feature):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_sub... | from __future__ import annotations
from typing import Any, cast, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._feature import _Feature, FillTypeJIT
class Mask(_Feature):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.... |
import logging
import os
from abc import abstractmethod
from typing import TYPE_CHECKING, Optional
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
if TYPE_CHECKING:
from fastapi import FastAPI
class FastAPIBaseGateway(BaseGateway):
"""Base FastAPI gateway. Implement thi... | import logging
import os
from abc import abstractmethod
from typing import TYPE_CHECKING, Optional
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
if TYPE_CHECKING:
from fastapi import FastAPI
class FastAPIBaseGateway(BaseGateway):
"""Base FastAPI gateway. Implement thi... |
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import (center_of_mass, flip_tensor, generate_coordinate,
mask2ndarray, multi_apply, unmap)
__all__ = [
'allreduce_grads'... | # Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import flip_tensor, mask2ndarray, multi_apply, unmap
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'... |
from typing import List, _LiteralGenericAlias, get_args, Tuple
import kuzu
Triple = Tuple[str, str, str]
def create_fresh_database(db: str) -> None:
"""
Create a new Kùzu database by removing existing database directory and its contents.
"""
import shutil
shutil.rmtree(db, ignore_errors=True)
... | from typing import List, _LiteralGenericAlias, get_args, Tuple
import kuzu
Triple = Tuple[str, str, str]
def create_fresh_database(db: str) -> None:
"""
Create a new Kùzu database by removing existing database directory and its contents.
"""
import shutil
shutil.rmtree(db, ignore_errors=True)
... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import bias_init_with_prob, normal_init
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .anchor_head ... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model.utils import bias_init_with_prob, normal_init
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .anchor... |
import numpy as np
from docarray import BaseDocument
from docarray.typing import AnyEmbedding
def test_set_embedding():
class MyDocument(BaseDocument):
embedding: AnyEmbedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding ==... | import numpy as np
from docarray import BaseDocument
from docarray.typing import Embedding
def test_set_embedding():
class MyDocument(BaseDocument):
embedding: Embedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.ze... |
"""Test the standard tests on the custom chat model in the docs."""
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_tests.unit_tests import ChatModelUnitTests
from .custom_chat_model import ChatParrotLink
class TestChatParrotLinkUnit(ChatModelUnitTests):
@property
def ... | """
Test the standard tests on the custom chat model in the docs
"""
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_tests.unit_tests import ChatModelUnitTests
from .custom_chat_model import ChatParrotLink
class TestChatParrotLinkUnit(ChatModelUnitTests):
@property
def... |
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .memory_profiler_hook import MemoryProfilerHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOX... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .memory_profiler_hook import MemoryProfilerHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .sync_random_si... |
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some ... | import os
from jina import Flow, Document, DocumentArray
from ...tfidf_text_executor import TFIDFTextEncoder # is implicitly required
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_generates_embedding():
doc = DocumentArray([Document(text='Han likes eating pizza')])
with Flow.load_conf... |
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ... | # Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ... |
import random
import time
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
@pytest.mark.parametrize('protocol', ['grpc'])
def test_return_order_in_client(protocol):
class ExecutorRandomSleepExecutor(Executor):
@requests
def foo(self, *args, **kwargs):
... | from jina import Flow, Executor, requests, Document, DocumentArray, Client
import random
import time
import pytest
@pytest.mark.parametrize('protocol', ['grpc'])
def test_return_order_in_client(protocol):
class ExecutorRandomSleepExecutor(Executor):
@requests
def foo(self, *args, **kwargs):
... |
import csv
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code... | from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample
from sentence_transformers import losses
import os
import gzip
import csv
from datetime import datetime
import logging
from torch.utils.data import ... |
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
from mmengine import DefaultScope
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fo... | # Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
from mmengine import DefaultScope
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fo... |
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric... | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
__all__ = [
'get_root_logger',
'collect_env',
'find_latest_checkpoint',
]
| # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
__all__ = ['get_root_logger', 'collect_env']
|
import re
import pytest
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowValidationError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
def test_decorated_config(workflow):
... | import re
import pytest
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowValidationError
from llama_index.core.workflow.events import Event
from llama_index.core.workflow.workflow import Workflow
def test_decorated_config(workflow):
def f(self, ev: Event... |
_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| _base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
"""Create Package variants for PyPI distribution."""
import argparse
import os
from test_utils import PY_PACKAGE
IN_PATH = os.path.join(PY_PACKAGE, "pyproject.toml.in")
OUT_PATH = os.path.join(PY_PACKAGE, "pyproject.toml")
CHOICES = ["default", "cpu", "manylinux2014"]
NCCL_WHL = """ \"nvidia-nccl-cu12 ; platfo... | """Create Package variants for PyPI distribution."""
import argparse
import os
from test_utils import PY_PACKAGE, ROOT
IN_PATH = os.path.join(PY_PACKAGE, "pyproject.toml.in")
OUT_PATH = os.path.join(PY_PACKAGE, "pyproject.toml")
WHL_CPU = """
[tool.hatch.build.targets.wheel]
packages = ["xgboost/"]
"""
CHOICES = [... |
import json
from typing import Any, Dict, List, Optional, Tuple
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.grpc import client_grpc_options
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.exce... | import pytest
from jina import Executor, Flow, requests
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.excepts import BadServer
from jina.logging.logger import JinaLogger
from jina.types.request.data import DataRequest
logger ... |
import pytest
from langchain_core.agents import (
AgentActionMessageLog,
AgentFinish,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)... | import pytest
from langchain_core.agents import (
AgentActionMessageLog,
AgentFinish,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)... |
"""Module for helper functions for parsing requirements file."""
import os
import re
from typing import Dict, Tuple, cast, List
from pkg_resources import Requirement
# Adopted from requirements-parser:
# https://github.com/madpah/requirements-parser
VCS = [
'git',
'hg',
'svn',
'bzr',
]
VCS_SCHEMES =... | """Module for helper functions for parsing requirements file."""
import os
import re
from typing import Dict, Tuple, cast
from pkg_resources import Requirement
# Adopted from requirements-parser:
# https://github.com/madpah/requirements-parser
VCS = [
'git',
'hg',
'svn',
'bzr',
]
VCS_SCHEMES = [
... |
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `Senten... | from __future__ import annotations
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: list[str] = None, label: int | float = 0):
"""
Creates one InputExample with the given texts, guid and label
Ar... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_scp_270k_coco_instance.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after h... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_scp_270k_coco_instance.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after h... |
from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any
from .data import DocumentData
from .mixins import AllMixins
from ..base import BaseDCType
from ..math.ndarray import detach_tensor_if_present
if TYPE_CHECKING:
from ..typing import ArrayType, StructValueType, DocumentContentType
cl... | from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any
from .data import DocumentData
from .mixins import AllMixins
from ..base import BaseDCType
from ..math.ndarray import detach_tensor_if_present
if TYPE_CHECKING:
from ..typing import ArrayType, StructValueType, DocumentContentType
cl... |
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
from langchain.retrievers.docume... | from typing import Any, List
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
from langchain.retrievers.... |
"""Test BigdlLLM"""
import os
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms.bigdl_llm import BigdlLLM
model_ids_to_test = os.getenv("TEST_BIGDLLLM_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test,
reason="TEST_BIGDLLLM_MODEL_IDS en... | """Test BigdlLLM"""
import os
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms.bigdl_llm import BigdlLLM
model_ids_to_test = os.getenv("TEST_BIGDLLLM_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test,
reason="TEST_BIGDLLLM_MODEL_IDS en... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig
@MODELS.register_module()
class FeatureRelayHead(BaseModule):
"""Feature Relay He... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils.typing import MultiConfig
from mmdet.registry import MODELS
@MODELS.register_module()
class FeatureRelayHead(BaseModule):
"""Feat... |
from .objective import squim_objective_base, squim_objective_model, SquimObjective
__all__ = [
"squim_objective_base",
"squim_objective_model",
"SquimObjective",
]
| from .objective import SQUIM_OBJECTIVE, squim_objective_base, squim_objective_model
__all__ = [
"squim_objective_base",
"squim_objective_model",
"SQUIM_OBJECTIVE",
]
|
import numpy as np
import torch
from docarray import Document
from docarray.document import AnyDocument
from docarray.typing import AnyUrl, Embedding, ImageUrl, Tensor, TorchTensor
def test_proto_all_types():
class Mymmdoc(Document):
tensor: Tensor
torch_tensor: TorchTensor
embedding: Emb... | import numpy as np
from docarray import Document
from docarray.document import AnyDocument
from docarray.typing import AnyUrl, Embedding, ImageUrl, Tensor
def test_proto_all_types():
class Mymmdoc(Document):
tensor: Tensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
... |
import logging
import traceback
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderModelCardData
from sentence_transformers.cross_encoder.evaluation import CrossEncoderNanoBEIREvaluator
from sentence_transformers.cross_encoder.losses import CachedMultipleNegati... | import logging
import traceback
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderModelCardData
from sentence_transformers.cross_encoder.evaluation import CENanoBEIREvaluator
from sentence_transformers.cross_encoder.losses import CachedMultipleNegativesRanking... |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
# Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg, replace_to_ceph)
__all__ = [
... | # Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg)
__all__ = [
'demo_mm_inputs',... |
import os
import pathlib
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args... | import os
import pathlib
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args... |
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
class FlattenTest(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse... | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
class FlattenTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
[
{"testcase... |
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of N... | from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of N... |
from typing import List
import argparse
import json
parser = argparse.ArgumentParser(prog="Prepender docs/_versions.json")
parser.add_argument(
"--version",
type=str,
help="The version we wish to prepend (e.g. v0.18.0)",
required=True,
)
args = parser.parse_args()
with open("./docs/_versions.json", en... | from typing import List
import argparse
import json
parser = argparse.ArgumentParser(prog="Prepender docs/_versions.json")
parser.add_argument(
"--version",
type=str,
help="The version we wish to prepend (e.g. v0.18.0)",
required=True,
)
args = parser.parse_args()
with open("./docs/_versions.json") as... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
def palette_val(palette):
"""Convert palette to matplotlib palette.
Args:
palette List[tuple]: A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palett... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import mmdet
def palette_val(palette):
"""Convert palette to matplotlib palette.
Args:
palette List[tuple]: A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
... |
# coding=utf-8
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requir... | # coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requir... |
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvisi... | """Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvisi... |
from typing import Dict, List, Optional, Set, Tuple, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal._typing import (
is_tensor_union,
is_type_tensor,
safe_issubclass,
)
from docarray.utils... | from typing import Dict, Optional, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal._typing import is_tensor_union, is_type_tensor
from docarray.utils._internal.misc import is_tf_available
tf_available... |
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DMod... | from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DMod... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses import deserialize
from keras.src.losses import get
from keras.src.losses import serialize
from keras.src.losses.loss import Loss
from keras.src.losses.losses import CTC
from k... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses import deserialize
from keras.src.losses import get
from keras.src.losses import serialize
from keras.src.losses.loss import Loss
from keras.src.losses.losses import CTC
from k... |
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
"""
def __init__(
se... | import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-... |
import numpy as np
import torch
from docarray import Document, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
NdArray,
Tensor,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
clas... | import numpy as np
import torch
from docarray import Document, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
NdArray,
Tensor,
TextUrl,
TorchTensor,
)
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
... |
"""
Demo for using data iterator with Quantile DMatrix
==================================================
.. versionadded:: 1.2.0
The demo that defines a customized iterator for passing batches of data into
:py:class:`xgboost.QuantileDMatrix` and use this ``QuantileDMatrix`` for
training. The feature is used pri... | """
Demo for using data iterator with Quantile DMatrix
==================================================
.. versionadded:: 1.2.0
The demo that defines a customized iterator for passing batches of data into
:py:class:`xgboost.QuantileDMatrix` and use this ``QuantileDMatrix`` for
training. The feature is used pri... |
"""Generate migrations for partner packages."""
import importlib
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retrievers import BaseRetriever
from l... | """Generate migrations for partner packages."""
import importlib
from typing import List, Tuple
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retriev... |
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.sparse_encoder import evaluation, losses
from sentence_transformers.training_args import Batch... | from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.sparse_encoder import evaluation, losses
from sentence_transformers.training_args import Batch... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_mlu_available, is_mps_available, is_npu_available)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
'is_mlu_available', 'is_mps_available', 'is_npu... | # Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_mlu_available, is_mps_available)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
'is_mlu_available', 'is_mps_available'
]
|
"""
Compute image embeddings
"""
import os
from PIL import Image
from sentence_transformers import SentenceTransformer, util
def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None:
model = clip_vit_b_32_model
# Encode an image:
image_filepath = os.path.join(
os.path.dirname(os... | """
Compute image embeddings
"""
import unittest
from sentence_transformers import SentenceTransformer, util
import numpy as np
from PIL import Image
import os
class ComputeEmbeddingsTest(unittest.TestCase):
def setUp(self):
self.model = SentenceTransformer('clip-ViT-B-32')
def test_simple_encode(sel... |
_base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='GridRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_si... | _base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='GridRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requir... |
"""
Computes embeddings
"""
from __future__ import annotations
import numpy as np
import pytest
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_... | """
Computes embeddings
"""
from typing import Optional
import numpy as np
import pytest
from sentence_transformers import SentenceTransformer
@pytest.mark.parametrize("normalize_embeddings", (False, True))
@pytest.mark.parametrize("prompt_name", (None, "retrieval"))
def test_encode_multi_process(
stsb_bert_ti... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkp... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkp... |
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 r... | from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 r... |
from typing import Dict, List
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.bridge.pydantic import ConfigDict
class EmbeddingStartEvent(BaseEvent):
"""
EmbeddingStartEvent.
Args:
model_dict (dict): Model dictionary containing details about the embedding... | from typing import Dict, List
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.bridge.pydantic import ConfigDict
class EmbeddingStartEvent(BaseEvent):
"""EmbeddingStartEvent.
Args:
model_dict (dict): Model dictionary containing details about the embedding mode... |
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussia... | # Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussia... |
from abc import abstractmethod
from typing import Any, List, Optional
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.llms.llm import LLM
from llama_index.core.schema import BaseComponent
from llama_index.core.storage.chat_store import BaseChatStore, SimpleChatStore
from llama_index.core... | from abc import abstractmethod
from typing import Any, List, Optional
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.llms.llm import LLM
from llama_index.core.schema import BaseComponent
from llama_index.core.storage.chat_store import BaseChatStore, SimpleChatStore
from llama_index.core... |
from typing import Union
from torch import nn
import transformers
import torch
from PIL import Image
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None):
super(CLIPModel, self).__init__()
if processor_name is None:
proc... | from typing import Union
from torch import nn
import transformers
import torch
from PIL import Image
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None):
super(CLIPModel, self).__init__()
if processor_name is None:
proc... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.model.wrappers import (MM... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
from mmengine... |
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import... | """
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import math
from s... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.