input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
"""
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding b... | """
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding b... |
import logging
import pathlib
from argparse import ArgumentParser
import torch
import torchaudio
from lightning import ConformerRNNTModule
from transforms import get_data_module
logger = logging.getLogger()
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().spl... | import logging
import pathlib
from argparse import ArgumentParser
import torch
import torchaudio
from lightning import ConformerRNNTModule, get_data_module
logger = logging.getLogger()
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().spl... |
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 5... | _base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.1... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_270k_coco_instance.py',
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncB... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_270k_coco_instance.py',
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after http... |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.core import url_to_fs
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, is_remote_filesystem
from .utils import require_lz4, require_zstandar... | import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.core import url_to_fs
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem
from .utils import requir... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.language.cobol import (
CobolSegmenter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation ... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.language.cobol import (
CobolSegmenter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation ... |
"""HTML node parser."""
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils impor... | """HTML node parser."""
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import... |
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
... | _base_ = './mask_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
styl... |
"""Langchain Embedding Wrapper Module."""
from typing import TYPE_CHECKING, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks import CallbackManager
if TYPE_CHE... | """Langchain Embedding Wrapper Module."""
from typing import TYPE_CHECKING, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks import CallbackManager
if TYPE_CHE... |
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model impo... | __copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model impo... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_... |
# Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, demo_track_inputs,
get_detector_cfg, get_roi_head_cfg, replace_to_ceph... | # Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg, replace_to_ceph)
__all__ = [
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOO... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, get_root_logger, log_img_scale
from .memory i... |
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.o... | import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.o... |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(
type='F... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(
type='F... |
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
... | from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset):
"""Ba... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset):
"""Ba... |
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core... | """Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core... |
import asyncio
import time
import pytest
from jina import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator, RequestsCounter
def slow_blocking_generator():
for i in range(2):
yield Document(id=str(i))
time.sleep(2)
@pytest.ma... | import time
import asyncio
from jina import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator
import pytest
def slow_blocking_generator():
for i in range(2):
yield Document(id=i)
time.sleep(2)
@pytest.mark.asyncio
async def t... |
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pyda... | """Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pyda... |
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIM_WRAPPER_CONSTRUCTORS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS,
RUNNERS, TASK_UTILS, TRANSFOR... | # Copyright (c) OpenMMLab. All rights reserved.
from .registry import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS,
RUNNERS, TASK_UTILS, TRANSFORMS, ... |
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_... | import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_... |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils.typing import ConfigDict, MultiConfig, OptConfigType
from ... | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import build_loss
from mmdet.registry import MODELS
@MODELS.register_module()
clas... |
import torch
from parameterized import parameterized
from torchaudio.prototype.models import conformer_wav2vec2_base, emformer_hubert_base
from torchaudio_unittest.common_utils import nested_params, skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSSLModel(TorchaudioTestCase):
def _smoke_test(self, model,... | import torch
from parameterized import parameterized
from torchaudio.prototype.models import conformer_wav2vec2_base, emformer_hubert_base
from torchaudio_unittest.common_utils import skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSSLModel(TorchaudioTestCase):
def _smoke_test(self, model, feature_dim, d... |
from dataclasses import dataclass, field
from typing import Union
from transformers import TrainingArguments as TransformersTrainingArguments
from transformers.utils import ExplicitEnum
class BatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for batch samplers.
The batch sampler i... | from dataclasses import dataclass, field
from typing import Union
from transformers import TrainingArguments as TransformersTrainingArguments
from transformers.utils import ExplicitEnum
class BatchSamplers(ExplicitEnum):
"""
Stores the acceptable string identifiers for batch samplers.
"""
BATCH_SAMPL... |
"""
RAKE keyword-table based index.
Similar to KeywordTableIndex, but uses RAKE instead of GPT.
"""
from typing import Any, Set, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.indices.keyword_table.base import (
BaseKeywordTableIndex,
KeywordTableRetrieverMode,
)
... | """RAKE keyword-table based index.
Similar to KeywordTableIndex, but uses RAKE instead of GPT.
"""
from typing import Any, Set, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.indices.keyword_table.base import (
BaseKeywordTableIndex,
KeywordTableRetrieverMode,
)
f... |
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
"""Tool for Steam Web API"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.steam import SteamWebAPIWrapper
class SteamWebAPIQueryRun(BaseTool):
"""Tool that searches the Steam Web API."""
... | """Tool for Steam Web API"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.steam import SteamWebAPIWrapper
class SteamWebAPIQueryRun(BaseTool): # type: ignore[override]
"""Tool that search... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import (
GoogleCloudEnterpriseSearchRetriever,
GoogleVertexAIMultiTurnSearchRetriever,
GoogleVertexAISearchRetriever,
)
# Create a way to dynamically ... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import (
GoogleCloudEnterpriseSearchRetriever,
GoogleVertexAIMultiTurnSearchRetriever,
GoogleVertexAISearchRetriever,
)
# Create a way to dynamically ... |
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
... | from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
m... |
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an a... | """
Given a tab seperated file (.tsv) with parallel sentences, where the second column is the translation of the sentence in the first column, for example, in the format:
src1 trg1
src2 trg2
...
where trg_i is the translation of src_i.
Given src_i, the TranslationEvaluator checks which trg_j has the highest sim... |
"""Helper functions for clients in Jina."""
from functools import wraps
from typing import Callable
from jina.excepts import BadClientCallback, BadServer
from jina.helper import get_rich_console
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2
from jina.types.request.data import Response
d... | """Helper functions for clients in Jina."""
from functools import wraps
from typing import Callable, Optional
from jina.excepts import BadClientCallback
from jina.helper import get_rich_console
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2
from jina.types.request.data import Response
de... |
"""Mongo client."""
from collections.abc import Callable
from typing import Dict, Iterable, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SimpleMongoReader(BaseReader):
"""Simple mongo reader.
Concatenates each Mongo doc into Document... | """Mongo client."""
from typing import Dict, Iterable, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SimpleMongoReader(BaseReader):
"""Simple mongo reader.
Concatenates each Mongo doc into Document used by LlamaIndex.
Args... |
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
... | import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
imag... |
import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.t... | import torch
from torchvision.transforms import autoaugment, transforms
from torchvision.transforms.functional import InterpolationMode
class ClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpol... |
import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
... | import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
... |
"""**Chat Sessions** are a collection of messages and function calls."""
from collections.abc import Sequence
from typing import TypedDict
from langchain_core.messages import BaseMessage
class ChatSession(TypedDict, total=False):
"""Chat Session.
Chat Session represents a single conversation, channel, or o... | """**Chat Sessions** are a collection of messages and function calls."""
from collections.abc import Sequence
from typing import TypedDict
from langchain_core.messages import BaseMessage
class ChatSession(TypedDict, total=False):
"""Chat Session represents a single
conversation, channel, or other group of m... |
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text he... | import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text he... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import Bit... | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
cl... |
import requests
from yarl import URL
from typing import Dict, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
JINA_SEARCH_URL_ENDPOINT = "https://s.jina.ai/"
class JinaToolSpec(BaseToolSpec):
"""
Jina tool spec.
"""
spec_functions = ["... | import requests
from yarl import URL
from typing import Dict, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
JINA_SEARCH_URL_ENDPOINT = "https://s.jina.ai/"
class JinaToolSpec(BaseToolSpec):
"""
Jina tool spec.
"""
spec_functions = ["... |
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import tree
from keras.src.utils.module_utils import tensorflow as tf
def get_input_signature(model):
if not isinstance(model, models.Model):
raise TypeError(
"The m... | from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import tree
from keras.src.utils.module_utils import tensorflow as tf
def get_input_signature(model):
if not isinstance(model, models.Model):
raise TypeError(
"The m... |
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
... | import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric i... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric i... |
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... |
"""
This module provides dynamic access to deprecated Jira tools.
When attributes like `JiraAction` are accessed, they are redirected to their new
locations in `langchain_community.tools`. This ensures backward compatibility
while warning developers about deprecation.
Attributes:
JiraAction (deprecated): Dynamica... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import JiraAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LO... |
from pathlib import Path
from typing import List
import pytest
import torch
from jina import Document, DocumentArray, Executor
from ...dpr_text import DPRTextEncoder
@pytest.fixture(scope='session')
def basic_encoder() -> DPRTextEncoder:
return DPRTextEncoder()
@pytest.fixture(scope='session')
def basic_encod... | from typing import List
import pytest
import torch
from jina import Document, DocumentArray
from jina.executors import BaseExecutor
from ...dpr_text import DPRTextEncoder
@pytest.fixture(scope='session')
def basic_encoder() -> DPRTextEncoder:
return DPRTextEncoder()
@pytest.fixture(scope='session')
def basic_... |
import numpy as np
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize... | import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result... |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
| import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
from backend.blocks.smartlead.models import (
AddLeadsRequest,
AddLeadsToCampaignResponse,
CreateCampaignRequest,
CreateCampaignResponse,
SaveSequencesRequest,
SaveSequencesResponse,
)
from backend.util.request import Requests
class SmartLeadClient:
"""Client for the SmartLead API"""
... | from backend.blocks.smartlead.models import (
AddLeadsRequest,
AddLeadsToCampaignResponse,
CreateCampaignRequest,
CreateCampaignResponse,
SaveSequencesRequest,
SaveSequencesResponse,
)
from backend.util.request import Requests
class SmartLeadClient:
"""Client for the SmartLead API"""
... |
_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
... | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
... |
import time
from jina import Flow
from tests.integration.instrumentation import ExecutorTestWithTracing, get_traces
def test_span_order(jaeger_port, otlp_collector, otlp_receiver_port):
f = Flow(
tracing=True,
traces_exporter_host='http://localhost',
traces_exporter_port=otlp_receiver_por... | import time
from jina import Flow
from tests.integration.instrumentation import ExecutorTestWithTracing, get_traces
def test_span_order(jaeger_port, otlp_collector, otlp_receiver_port):
f = Flow(
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=otlp_receiver_port,
... |
# Copyright 2024 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
... | # Copyright 2024 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
... |
#!/usr/bin/env python3
"""Generate the conf JSONs from fairseq pretrained weight file, consumed by unit tests
Note:
The current configuration files were generated on fairseq e47a4c84
Usage:
1. Download pretrained parameters from https://github.com/pytorch/fairseq/tree/main/examples/hubert
2. Run this script and s... | #!/usr/bin/env python3
"""Generate the conf JSONs from fairseq pretrained weight file, consumed by unit tests
Note:
The current configuration files were generated on fairseq e47a4c84
Usage:
1. Download pretrained parameters from https://github.com/pytorch/fairseq/tree/main/examples/hubert
2. Run this script and s... |
import sys
from jina.serve.runtimes.head import HeadRuntime
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
runtime_args.host = runtime_args.host[0]
runtime_args.port = runtime_args.port[0]
with HeadRuntime(runtime_args) as runtime:
... | import sys
from jina.serve.runtimes.head import HeadRuntime
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
with HeadRuntime(runtime_args) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting imp... | # Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting imp... |
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
... | from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
TBLRBBoxCoder)
from .iou_calc... |
"""Filter that uses an LLM to drop documents that aren't relevant to the query."""
from collections.abc import Sequence
from typing import Any, Callable, Optional
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.language_models im... | """Filter that uses an LLM to drop documents that aren't relevant to the query."""
from collections.abc import Sequence
from typing import Any, Callable, Optional
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.language_models im... |
from ._hubert_loss import hubert_loss
__all__ = [
"hubert_loss",
"wav2vec2_loss",
]
| from ._hubert_loss import hubert_loss
__all__ = [
"hubert_loss",
]
|
import os
import subprocess
import pytest
from xgboost import testing as tm
pytestmark = [
pytest.mark.skipif(**tm.no_dask()),
pytest.mark.skipif(**tm.no_dask_cuda()),
tm.timeout(60),
]
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_dask_training() -> None:
script = os.path.join(tm.... | import os
import subprocess
import pytest
from xgboost import testing as tm
pytestmark = [
pytest.mark.skipif(**tm.no_dask()),
pytest.mark.skipif(**tm.no_dask_cuda()),
tm.timeout(60),
]
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_dask_training():
script = os.path.join(tm.demo_dir... |
from dask.array.fft import * # noqa: F403
# dask.array.fft doesn't have __all__. If it is added, replace this with
#
# from dask.array.fft import __all__ as linalg_all
_n = {}
exec('from dask.array.fft import *', _n)
for k in ("__builtins__", "Sequence", "annotations", "warnings"):
_n.pop(k, None)
fft_all = list(_n... | from dask.array.fft import * # noqa: F403
# dask.array.fft doesn't have __all__. If it is added, replace this with
#
# from dask.array.fft import __all__ as linalg_all
_n = {}
exec('from dask.array.fft import *', _n)
del _n['__builtins__']
fft_all = list(_n)
del _n
from ...common import _fft
from ..._internal import g... |
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests.chat_models import (
ChatModelUnitTests,
)
from langchain_groq import ChatGroq
class TestGroqStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseCh... | """Standard LangChain interface tests"""
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests.chat_models import (
ChatModelUnitTests,
)
from langchain_groq import ChatGroq
class TestGroqStandard(ChatModelUnitTests):
@property
def chat_model_c... |
from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.ut... | from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import UserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.t... |
from google.protobuf import __version__ as __pb__version__
from jina._docarray import docarray_v2 as is_docarray_v2
if __pb__version__.startswith('4'):
if is_docarray_v2:
from .docarray_v2.pb.jina_pb2_grpc import *
else:
from .docarray_v1.pb.jina_pb2_grpc import *
else:
if is_docarray_v2:... | from google.protobuf import __version__ as __pb__version__
if __pb__version__.startswith('4'):
from .pb.jina_pb2_grpc import *
else:
from .pb2.jina_pb2_grpc import *
|
from typing import Iterable, Dict, TYPE_CHECKING
import numpy as np
from docarray import DocumentArray
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.milvus.backend import (
_always_true_expr,
_ids_to_mi... | from typing import Iterable, Dict, TYPE_CHECKING
import numpy as np
from docarray import DocumentArray
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.milvus.backend import (
_always_true_expr,
_ids_to_mi... |
"""Generation output schema."""
from __future__ import annotations
from typing import Any, Literal, Optional
from pydantic import computed_field
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
... | """Generation output schema."""
from __future__ import annotations
from typing import Any, Literal, Optional
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
Generation represents the respon... |
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_r... | # Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_r... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_batch_norm, has_method,
import_modules_from_strings, is_... | # Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_batch_norm, has_method,
import_modules_from_strings, is_... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.0.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.0.1'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.0.1'
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
... |
from keras.src import backend
from keras.src import ops
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_pooling_output_shape
from keras.src.utils import argument_validation
class BasePooling(Layer):
"""Base pooling layer.... | from keras.src import backend
from keras.src import ops
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_pooling_output_shape
from keras.src.utils import argument_validation
class BasePooling(Layer):
"""Base pooling layer.... |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
from docarray import BaseDocument
from docarray.typing import Mesh3DUrl
def test_set_mesh_url():
class MyDocument(BaseDocument):
mesh_url: Mesh3DUrl
d = MyDocument(mesh_url="https://jina.ai/mesh.obj")
assert isinstance(d.mesh_url, Mesh3DUrl)
assert d.mesh_url == "https://jina.ai/mesh.obj"
| from docarray import Document
from docarray.typing import Mesh3DUrl
def test_set_mesh_url():
class MyDocument(Document):
mesh_url: Mesh3DUrl
d = MyDocument(mesh_url="https://jina.ai/mesh.obj")
assert isinstance(d.mesh_url, Mesh3DUrl)
assert d.mesh_url == "https://jina.ai/mesh.obj"
|
from collections.abc import Sequence
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
class SelfAskOutputParser(AgentOutputParser):
"""Parses self-ask style LLM cal... | from collections.abc import Sequence
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
class SelfAskOutputParser(AgentOutputParser):
"""Parses self-ask style LLM cal... |
# Whether to disable showing progress on compilation passes
# Need to add a new config otherwise will get a circular import if dynamo config is imported here
disable_progress = True
# If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy
verbose_progress... | # Whether to disable showing progress on compilation passes
# Need to add a new config otherwise wil get a circular import if dynamo config is imported here
disable_progress = True
# If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy
verbose_progress ... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... |
import logging
from typing import Optional, cast
from autogpt_libs.supabase_integration_credentials_store.types import (
UserIntegrations,
UserMetadata,
UserMetadataRaw,
)
from fastapi import HTTPException
from prisma import Json
from prisma.models import User
from backend.data.db import prisma
from backe... | from typing import Optional
from autogpt_libs.supabase_integration_credentials_store.types import UserMetadataRaw
from fastapi import HTTPException
from prisma import Json
from prisma.models import User
from backend.data.db import prisma
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
DEFAULT_EMAIL = "defau... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.data_elements import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(TestCase)... |
from __future__ import annotations
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown
from langchain.agents import AgentOutputParser
from langchain.agents.conversatio... | from __future__ import annotations
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown
from langchain.agents import AgentOutputParser
from langchain.agents.conversatio... |
"""Test EdenAi's invoice parser Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and wi... | """Test EdenAi's invoice parser Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and wi... |
from __future__ import annotations
from typing import Any, Dict, List, Optional, Type
from langchain_core.tools import BaseTool, BaseToolkit
from langchain_core.utils.pydantic import get_fields
from pydantic import model_validator
from langchain_community.tools.file_management.copy import CopyFileTool
from langchain... | from __future__ import annotations
from typing import Any, Dict, List, Optional, Type
from langchain_core.tools import BaseTool, BaseToolkit
from langchain_core.utils.pydantic import get_fields
from pydantic import model_validator
from langchain_community.tools.file_management.copy import CopyFileTool
from langchain... |
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/3.x/'
files = sorted(glob.glob('../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('... | #!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/'
files = sorted(glob.glob('../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replac... |
# Optional list of dependencies required by the package
dependencies = ["torch"]
from torchvision.models import get_model_weights, get_weight
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
from torchvision.models.dense... | # Optional list of dependencies required by the package
dependencies = ["torch"]
from torchvision.models import get_model_weights, get_weight
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
from torchvision.models.dense... |
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.45"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_in... | """Top-level imports for LlamaIndex."""
__version__ = "0.12.44"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_in... |
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .squim_pipeline import SQUIM_OBJECTIVE, SquimObjectiveBundle
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_... | from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
]
|
from typing import Any
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
def arize_phoenix_callback_handler(**kwargs: Any) -> BaseCallbackHandler:
# newer versions of arize, v2.x
try:
from openinference.instrumentation.llama_index import LlamaIndexInstrumentor
from opent... | from typing import Any
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
def arize_phoenix_callback_handler(**kwargs: Any) -> BaseCallbackHandler:
# newer versions of arize, v2.x
try:
from openinference.instrumentation.llama_index import LlamaIndexInstrumentor
from opent... |
from typing import Any, List, Optional, Union
from pathlib import Path
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import ... | from typing import Any, List, Optional, Union
from pathlib import Path
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import ... |
# Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform objec... | # Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform objec... |
# Copyright (c) OpenMMLab. All rights reserved.
from .log_buffer import LogBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = ['LogBuffer', 'MessageHub', 'MMLogger', 'print_log']
| # Copyright (c) OpenMMLab. All rights reserved.
from .base_global_accsessible import BaseGlobalAccessible, MetaGlobalAccessible
from .log_buffer import LogBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = [
'LogBuffer', 'MessageHub', 'MetaGlobalAccessible', 'BaseGlobalAcc... |
"""Airtable reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from pyairtable import Table
class AirtableReader(BaseReader):
"""
Airtable reader. Reads data from a table in a base.
Args:
api_key (str): Airtable A... | """Airtable reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from pyairtable import Table
class AirtableReader(BaseReader):
"""
Airtable reader. Reads data from a table in a base.
Args:
api_key (str): Airtable AP... |
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGene... | _base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGene... |
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Lite... | """**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Lite... |
import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.... | import prisma
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInc... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from ...faiss_searcher import FaissSearcher
def _get_d... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_stream... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result, bbox_mapping_back
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementatio... | import torch
from mmdet.core import bbox2result, bbox_mapping_back
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.mnist import load_data as load_data
| """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.mnist import load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.california_housing import load_data as load_data
| """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.california_housing import load_data
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
de... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
de... |
import numpy as np
import pytest
import torch
from docarray import BaseDoc
from docarray.base_doc import AnyDoc
from docarray.typing import (
AnyEmbedding,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
@pytest.mark.proto
def test_proto_all_types():
... | import numpy as np
import pytest
import torch
from docarray import BaseDoc
from docarray.base_doc import AnyDoc
from docarray.typing import (
AnyEmbedding,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
@pytest.mark.proto
def test_proto_all_types():
... |
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... | import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.