input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .cascade_rcnn import CascadeRCNN
@DETECTORS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs):
super(SCNet, self).__init__(**... | from ..builder import DETECTORS
from .cascade_rcnn import CascadeRCNN
@DETECTORS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs):
super(SCNet, self).__init__(**kwargs)
|
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.chat_summary_memory_buffer import ChatSummaryMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.memory.vector_memory import VectorMemory
from llama_index.core.memory.simple_composabl... | from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.chat_summary_memory_buffer import ChatSummaryMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.memory.vector_memory import VectorMemory
from llama_index.core.memory.simple_composabl... |
_CTC_DECODERS = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
def __getattr__(name: str):
if name in _CTC_DECODERS:
try:
from . import _ctc_decoder
except Exception as err:
raise R... | _INITIALIZED = False
_LAZILY_IMPORTED = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
try:
from . import _ctc_decoder
except AttributeE... |
from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.t... | from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.t... |
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import OBJ_MIMETYPE
from docarray.utils._internal.misc import import_library
if TYPE_CH... | from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
T = TypeVar('T', bound='Url... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
fro... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
fro... |
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an a... | """
Given a tab separated file (.tsv) with parallel sentences, where the second column is the translation of the sentence in the first column, for example, in the format:
src1 trg1
src2 trg2
...
where trg_i is the translation of src_i.
Given src_i, the TranslationEvaluator checks which trg_j has the highest sim... |
"""String output parser."""
from typing import Optional as Optional
from langchain_core.output_parsers.transform import BaseTransformOutputParser
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@classmethod
def is_lc_serializab... | from typing import Optional as Optional
from langchain_core.output_parsers.transform import BaseTransformOutputParser
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""R... |
from __future__ import annotations
import json
import logging
import time
from typing import List, Optional, Type
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl, validator
from langchain_community.tools.edenai.edenai_base_tool import Eden... | from __future__ import annotations
import json
import logging
import time
from typing import List, Optional, Type
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl, validator
from langchain_community.tools.edenai.edenai_base_tool import Eden... |
"""Select examples based on length."""
import re
from typing import Callable
from pydantic import BaseModel, Field, model_validator
from typing_extensions import Self
from langchain_core.example_selectors.base import BaseExampleSelector
from langchain_core.prompts.prompt import PromptTemplate
def _get_length_based... | """Select examples based on length."""
import re
from typing import Callable
from pydantic import BaseModel, Field, model_validator
from typing_extensions import Self
from langchain_core.example_selectors.base import BaseExampleSelector
from langchain_core.prompts.prompt import PromptTemplate
def _get_length_based... |
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
@pytest.mark.proto
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor.... | import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()... |
from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.schema import Document
def test_header_splits() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Hea... | from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.schema import Document
def test_header_splits() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Hea... |
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdAr... | from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdAr... |
"""Utilities for working with HTML."""
import logging
import re
from collections.abc import Sequence
from typing import Optional, Union
from urllib.parse import urljoin, urlparse
logger = logging.getLogger(__name__)
PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#")
SUFFIXES_TO_IGNORE = (
".css",
".js",
... | import logging
import re
from collections.abc import Sequence
from typing import Optional, Union
from urllib.parse import urljoin, urlparse
logger = logging.getLogger(__name__)
PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#")
SUFFIXES_TO_IGNORE = (
".css",
".js",
".ico",
".png",
".jpg",
".... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import bounding_boxes
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_ker... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.var... |
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .utils import *
| # Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .registry import *
from .utils import *
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkp... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkp... |
_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(
typ... | _base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], t... |
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
from jina.constants import __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyRe... | import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.24.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.24.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version... |
from __future__ import annotations
import tempfile
class SafeTemporaryDirectory(tempfile.TemporaryDirectory):
"""
The GitHub Actions CI on Windows sometimes raises a NotADirectoryError when cleaning up the temporary directory.
This class is a workaround to avoid the error.
Unlike tempfile.TemporaryD... | from __future__ import annotations
import tempfile
class SafeTemporaryDirectory(tempfile.TemporaryDirectory):
"""
The GitHub Actions CI on Windows sometimes raises a NotADirectoryError when cleaning up the temporary directory.
This class is a workaround to avoid the error.
Unlike tempfile.TemporaryD... |
import subprocess
import pytest
from flair_text import FlairTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 100
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here... | import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...flair_text import FlairTextEncoder
_EMBEDDING_DIM = 100
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text ... |
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip... | from sentence_transformers import SentenceTransformer, util, models
from PIL import Image
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPProcessor, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.sequence_utils import pad_sequences
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.le... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.sequence_utils import pad_sequences
|
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "... | import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "... |
"""DeepLake reader."""
from typing import List, Optional, Union
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
distance_metric_map = {
"l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2),
"l1": lambda a, b: np.linalg.norm(a - b, axis=1,... | """DeepLake reader."""
from typing import List, Optional, Union
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
distance_metric_map = {
"l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2),
"l1": lambda a, b: np.linalg.norm(a - b, axis=1,... |
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was create... | import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train", trust_remote_code=True).map(
lambda batch: ... |
import pytest
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
def test_bad_import():
from jina.logging.predefined import default_logger
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
impo... | import pytest
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
def test_bad_import():
from jina.logging.predefined import default_logger
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
impo... |
import ipaddress
import socket
from typing import Callable
from urllib.parse import urlparse
import requests as req
from backend.util.settings import Config
# List of IP networks to block
BLOCKED_IP_NETWORKS = [
# --8<-- [start:BLOCKED_IP_NETWORKS]
ipaddress.ip_network("0.0.0.0/8"), # "This" Network
ipa... | import ipaddress
import socket
from typing import Callable
from urllib.parse import urlparse
import requests as req
from backend.util.settings import Config
# List of IP networks to block
BLOCKED_IP_NETWORKS = [
ipaddress.ip_network("0.0.0.0/8"), # "This" Network
ipaddress.ip_network("10.0.0.0/8"), # Priva... |
"""PDF Table reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PDFTableReader(BaseReader):
"""
PDF Table Reader. Reads table from PDF.
Args:
... | """PDF Table reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PDFTableReader(BaseReader):
"""PDF Table Reader. Reads table from PDF.
Args:
row_... |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
"""
Data pre-processing: create tsv files for training (and valiation).
"""
import logg... | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
"""
Data pre-processing: create tsv files for training (and valiation).
"""
import logg... |
from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,... | from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,... |
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from pydantic import Field
from jina import Executor, requests
class TextDoc(BaseDoc):
text: str
class EmbeddingResponseModel(BaseDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=... | import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from pydantic import Field
from jina import Executor, requests
class TextDoc(BaseDoc):
text: str
class EmbeddingResponseModel(BaseDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=... |
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_n... | from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_n... |
from __future__ import annotations
from typing import Any, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler... | from __future__ import annotations
from typing import Any, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler... |
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Dict, Optional
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average... | # Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Optional
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average (EMA)... |
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class Dropout(Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
config_keys: list[str] = ["dropout"]
def __init__(self, dropout: flo... | from __future__ import annotations
import json
import os
from torch import Tensor, nn
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super().__init__()
self.dropout = dropout
... |
import json
import logging
import os
from typing import Dict, List, Literal
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting ca... | import json
import logging
import os
from typing import Dict, List
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be adde... |
from keras.src.export.onnx import export_onnx
from keras.src.export.saved_model import ExportArchive
from keras.src.export.saved_model import export_saved_model
from keras.src.export.tfsm_layer import TFSMLayer
| from keras.src.export.export_lib import ExportArchive
|
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHEC... | from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHEC... |
import pytest
from jina.excepts import NoAvailablePortError
| import pytest
from jina.excepts import NoAvailablePortError
@pytest.fixture(scope='function', autouse=True)
def patched_random_port(mocker):
print('using random port fixture...')
used_ports = set()
from jina.helper import random_port
def _random_port():
for i in range(10):
_port ... |
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvisi... | """Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is t... |
# mypy: allow-untyped-defs
__all__ = ["hashable", "transitive_get", "raises", "reverse_dict", "xfail", "freeze"]
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def transitive_get(key, d):
"""Transitive dict.get
>>> d = {1: 2, 2: 3, 3: 4}
>>> d.ge... | # mypy: allow-untyped-defs
__all__ = ["hashable", "transitive_get", "raises", "reverse_dict", "xfail", "freeze"]
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def transitive_get(key, d):
"""Transitive dict.get
>>> d = {1: 2, 2: 3, 3: 4}
>>> d.ge... |
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..... | import os
import urllib
import pytest
from pydantic import parse_obj_as
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.parame... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def bui... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def bui... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray
from ...laser_encoder import LaserEncoder
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray
from jinahub.encoder.laser_encoder import LaserEncoder
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _... |
# Copyright (c) OpenMMLab. All rights reserved.
import pickle
from collections import OrderedDict
import numpy as np
import pytest
import torch
from mmengine import MessageHub
class TestMessageHub:
def test_init(self):
message_hub = MessageHub('name')
assert message_hub.instance_name == 'name'
... | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmengine import MessageHub
class TestMessageHub:
def test_init(self):
message_hub = MessageHub('name')
assert message_hub.instance_name == 'name'
assert len(message_hub.log_buffers) == 0
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.pdf import (
AmazonTextractPDFParser,
DocumentIntelligenceParser,
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
Py... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.pdf import (
AmazonTextractPDFParser,
DocumentIntelligenceParser,
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
Py... |
"""
Slides parser.
Contains parsers for .pptx files.
"""
import os
import tempfile
from pathlib import Path
from typing import Dict, List, Optional
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.utils impo... | """Slides parser.
Contains parsers for .pptx files.
"""
import os
import tempfile
from pathlib import Path
from typing import Dict, List, Optional
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.utils impor... |
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codeca... | # THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codeca... |
import pytest
import os
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from llama_index.embeddings.nvidia.base import DEFAULT_MODEL
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value o... | import pytest
import os
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from llama_index.embeddings.nvidia.base import DEFAULT_MODEL
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value o... |
import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
... | import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
... |
import numpy as np
from mmdet.core.evaluation.mean_ap import (eval_map, tpfp_default,
tpfp_imagenet, tpfp_openimages)
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20... | import numpy as np
from mmdet.core.evaluation.mean_ap import eval_map, tpfp_default, tpfp_imagenet
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]]... |
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
import logging
import os
import sys
import tarfile
from sentence_tran... | """
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
from sentence_transformers import LoggingHandler, SentenceTransformer... |
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
__all__ = ['NdArrayEmbedding', 'AnyEmbedding']
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_availa... | from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
__all__ = ['NdArrayEmbedding', 'AnyEmbedding']
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
f... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from docarray.array.any_array import AnyDocArray
from docarray.array.doc_list.doc_list import DocList
from docarray.array.doc_vec.doc_vec import DocVec
__all__ = ['DocList', 'DocVec', 'AnyDocArray']
|
import pytest
from typing import Dict, List
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
class MySimpleDoc(BaseDoc):
title: str
class MyComplexDoc(BaseDoc):
... | import pytest
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzm... |
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "hf_hZEmnoOEYISjraJt... | import time
import uuid
from contextlib import contextmanager
from pathlib import Path
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOK... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from ...torch_encoder import ImageTorchEncoder
@pytest.mark.parametrize('arr_in', [
(np.ones((224, 224, ... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
try:
from torch_encoder import ImageTorchEncoder
except:
from jinahub.image.encoder.torch_encoder impo... |
import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information... | import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information... |
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_method,
import_modules_from_strings, is_list_of,
... | # Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is... |
from typing import Iterable, Dict, Sequence
import math
import numpy as np
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for `... | from typing import Iterable, Dict
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ... |
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransfor... | from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransfor... |
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_exec... | from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_grap... |
"""Rayyan review reader."""
import logging
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class RayyanReader(BaseReader):
"""
Rayyan reader. Reads articles from a Rayyan review.
Args:
credentials_path (str): Rayyan creden... | """Rayyan review reader."""
import logging
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class RayyanReader(BaseReader):
"""Rayyan reader. Reads articles from a Rayyan review.
Args:
credentials_path (str): Rayyan credentials... |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""
Utility that checks that mo... | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""
Utility that checks that mo... |
"""
Compute image embeddings
"""
import os
from PIL import Image
from sentence_transformers import SentenceTransformer, util
def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None:
model = clip_vit_b_32_model
# Encode an image:
image_filepath = os.path.join(
os.path.dirname(os... | """
Compute image embeddings
"""
import os
from PIL import Image
from sentence_transformers import util, SentenceTransformer
def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None:
model = clip_vit_b_32_model
# Encode an image:
image_filepath = os.path.join(
os.path.dirname(os... |
import sys
import pytest
from hypothesis import given, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_d... | import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_... |
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pydantic import BaseModel, ConfigDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import BlockSecret, SchemaField, SecretField
class EmailCredentials(Base... | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pydantic import BaseModel, ConfigDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import BlockSecret, SchemaField, SecretField
class EmailCredentials(Base... |
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain.chains.openai_functions.base import (
create_openai_fn_chain,
create_structured_output_chain,
)
from langchain.chains.openai_functions.citation_fuzzy_match import (
create_citation_fuzzy_match_chain,
create_cita... | from langchain_core.utils.function_calling import convert_to_openai_function
from langchain.chains.openai_functions.base import (
create_openai_fn_chain,
create_structured_output_chain,
)
from langchain.chains.openai_functions.citation_fuzzy_match import (
create_citation_fuzzy_match_chain,
create_cita... |
from llama_index.vector_stores.couchbase.base import (
CouchbaseVectorStore,
CouchbaseSearchVectorStore,
)
__all__ = ["CouchbaseVectorStore", "CouchbaseSearchVectorStore"]
| from llama_index.vector_stores.couchbase.base import CouchbaseVectorStore, CouchbaseSearchVectorStore
__all__ = ["CouchbaseVectorStore", "CouchbaseSearchVectorStore"]
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def... | import numpy as np
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def call(self, x,... |
import pytest # type: ignore[import-not-found, import-not-found]
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
| import pytest # type: ignore[import-not-found, import-not-found]
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmengine.config import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
d... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_ar... |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_dataset_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
de... | from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def te... |
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
__all__ = ['DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder']
| # Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
__all__ = ['DropBlock']
|
def split_package(package: str) -> tuple[str, str]:
"""Split a package name into the containing package and the final name."""
parts = package.split(".")
return ".".join(parts[:-1]), parts[-1]
def dump_migrations_as_grit(name: str, migration_pairs: list[tuple[str, str]]) -> str:
"""Dump the migration ... | def split_package(package: str) -> tuple[str, str]:
"""Split a package name into the containing package and the final name."""
parts = package.split(".")
return ".".join(parts[:-1]), parts[-1]
def dump_migrations_as_grit(name: str, migration_pairs: list[tuple[str, str]]) -> str:
"""Dump the migration ... |
from torchaudio._internal.module_utils import dropping_support
from ._alignment import forced_align as _forced_align, merge_tokens, TokenSpan
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biqu... | from torchaudio._internal.module_utils import dropping_support
from ._alignment import forced_align as _forced_align, merge_tokens, TokenSpan
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biqu... |
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import ... | # Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import ... |
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio... | import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import IterTimerHook
class TestIterTimerHook:
def test_before_epoch(self):
hook = IterTimerHook()
runner = Mock()
hook._before_epoch(runner)
assert isinstance(hook.t, float)
de... |
from llama_index.llms.huggingface.base import (
HuggingFaceLLM,
)
__all__ = ["HuggingFaceLLM"]
| from llama_index.llms.huggingface.base import (
HuggingFaceInferenceAPI,
HuggingFaceLLM,
TextGenerationInference,
)
__all__ = ["HuggingFaceLLM", "HuggingFaceInferenceAPI", "TextGenerationInference"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_modu... | # Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, List, Optional, Sequence, Tuple
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_B... |
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
class WebSocketGatewayRuntime(Gatewa... | import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
class WebSocketGatewayRuntime(Gatewa... |
from __future__ import annotations
import json
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_t... | from __future__ import annotations
import json
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_t... |
import numpy as np
from docarray import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, n... | import numpy as np
from docarray import Image
def test_image():
image = Image(url='http://jina.ai')
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks, c... | import random
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks, currently
used in YOLOX.
Args:
r... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.llmonitor_callback import (
LLMonitorCallbackHandler,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation w... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.llmonitor_callback import (
LLMonitorCallbackHandler,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation w... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
# student
bac... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
# student
bac... |
"""Loading a pickled model generated by test_pickling.py, only used by
`test_gpu_with_dask.py`"""
import json
import os
import numpy as np
import pytest
from test_gpu_pickling import build_dataset, load_pickle, model_path
import xgboost as xgb
class TestLoadPickle:
def test_load_pkl(self) -> None:
"""T... | """Loading a pickled model generated by test_pickling.py, only used by
`test_gpu_with_dask.py`"""
import json
import os
import numpy as np
import pytest
from test_gpu_pickling import build_dataset, load_pickle, model_path
import xgboost as xgb
from xgboost import testing as tm
class TestLoadPickle:
def test_lo... |
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
... | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
... |
from typing import cast
import prisma.enums
import prisma.types
from backend.blocks.io import IO_BLOCK_IDs
from backend.util.type import typed_cast
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.... | import prisma.enums
import prisma.types
from backend.blocks.io import IO_BLOCK_IDs
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NO... |
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianNoise")
class GaussianNoise(layers.Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you cou... | from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianNoise")
class GaussianNoise(layers.Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you cou... |
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
... | _base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
img_norm_cfg = dict(
... |
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/... | _base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
fil... |
"""This module contains the core type definitions and protocols used throughout Dynamo.
The types defined here fall into several categories:
- Guard related types (GuardFn, GuardFail, GuardedCode): Used for tracking and managing guards that protect compiled code
- Frame and cache types (FrameState, CacheEntry): Used f... | """This module contains the core type definitions and protocols used throughout Dynamo.
The types defined here fall into several categories:
- Guard related types (GuardFn, GuardFail, GuardedCode): Used for tracking and managing guards that protect compiled code
- Frame and cache types (FrameState, CacheEntry): Used f... |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | # flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... |
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
ga... | _base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
ga... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
preprocess_cfg = dict(
mean=[123.675, 116.28,... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
# student
ba... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.