input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_steps.md']
def check_raw_file_full(raw, lang="python", keyword_... | import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_steps.md']
def check_raw_file_full(raw, lang="python", keyword_... |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
def _check_fields(results, pipeline_results, keys):
"""Check data in fields from two results are same."""
for key in keys:
if isinstance(results[key], (BitmapMasks, PolygonMasks)):... | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
def _check_fields(results, pipeline_results, keys):
"""Check data in fields from two results are same."""
for key in keys:
if isinstance(results[key], (BitmapMasks, PolygonMasks)):... |
import math
import sys
import time
import torch
import torchvision.models.detection.mask_rcnn
import utils
from coco_eval import CocoEvaluator
from coco_utils import get_coco_api_from_dataset
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler=None):
model.train()
metric_logg... | import math
import sys
import time
import torch
import torchvision.models.detection.mask_rcnn
import utils
from coco_eval import CocoEvaluator
from coco_utils import get_coco_api_from_dataset
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler=None):
model.train()
metric_logg... |
"""Correctness evaluation."""
import asyncio
from typing import Any, Callable, Optional, Sequence, Tuple, Union
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.evaluation.eval_utils import default_parser
from llama_index.core.llms.llm import LLM
from llama_index.core... | """Correctness evaluation."""
import asyncio
from typing import Any, Callable, Optional, Sequence, Tuple, Union
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.evaluation.eval_utils import default_parser
from llama_index.core.llms.llm import LLM
from llama_index.core... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
InfoPowerBITool,
ListPowerBITool,
QueryPowerBITool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for ra... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
InfoPowerBITool,
ListPowerBITool,
QueryPowerBITool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for ra... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... |
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Option... | import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
... |
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .sparse_rcnn import SparseRCNN
@DETECTORS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone,
... | from ..builder import DETECTORS
from .sparse_rcnn import SparseRCNN
@DETECTORS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone,
rpn_head,
roi_he... |
from typing import List
import numpy as np
def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
"""Return the number of possible shards according to the input gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sha... | from typing import List
import numpy as np
def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
"""Return the number of possible shards according to the input gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sha... |
import pytest
from jina.jaml.parsers.executor.legacy import ExecutorLegacyParser
class A00:
def __init__(self, a00):
self.a00 = a00
class A0(A00):
def __init__(self, a0):
self.a0 = a0
class A(A0):
def __init__(self, a):
self.a = a
class B:
def __init__(self, b):
... | import pytest
from jina.jaml.parsers.executor.legacy import LegacyParser
class A00:
def __init__(self, a00):
self.a00 = a00
class A0(A00):
def __init__(self, a0):
self.a0 = a0
class A(A0):
def __init__(self, a):
self.a = a
class B:
def __init__(self, b):
self.b =... |
# Optional list of dependencies required by the package
dependencies = ["torch"]
from torchvision.models import get_model_weights, get_weight
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
from torchvision.models.dense... | # Optional list of dependencies required by the package
dependencies = ["torch"]
from torchvision.models import get_model_weights, get_weight
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
from torchvision.models.dense... |
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='module')
def basic_encoder() -> AudioCLIPTextEncoder:
return AudioCLIPTextEncoder()
def test_config... | from pathlib import Path
from typing import List
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex... |
import os
from functools import lru_cache
from typing import Optional, Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_L... | import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SA... |
"""
This script contains an example how to perform semantic search with Elasticsearch.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Questions are indexed to Elasticsearch together with their ... | """
This script contains an example how to perform semantic search with ElasticSearch.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Questions are indexed to ElasticSearch together with their ... |
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i i... | import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i i... |
import pytest
from backend.data import db
from backend.executor import ExecutionScheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.... | import pytest
from backend.data import db
from backend.executor import ExecutionScheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.... |
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.schema import QueryType
class QueryStartEvent(BaseEvent):
"""
QueryStartEvent.
Args:
query (QueryType): Query as a string or query bundle.
... | from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.schema import QueryType
class QueryStartEvent(BaseEvent):
"""QueryStartEvent.
Args:
query (QueryType): Query as a string or query bundle.
"""
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .amp import autocast
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
... | # Copyright (c) OpenMMLab. All rights reserved.
from .amp import autocast
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
... |
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
... | from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
... |
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox2roi
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class OHEMSampler(BaseSampler):
r"""Online Hard Example Mining Sampler described in `Training Region-ba... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.data_elements.bbox import bbox2roi
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class OHEMSampler(BaseSampler):
r"""Online Hard Example Mining Sampler described in `Training Region... |
"""Function calling agent."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.agent.runner.base import AgentRunner, AgentState
from llama_index.core.agent.function_calling.step import (
FunctionCallingAgentWorker,
DEFAULT_MAX_FUNCTION_CALLS,
)
from llama_index.core.base.llms.typ... | """Function calling agent."""
from typing import Any, List, Optional
from llama_index.core.agent.runner.base import AgentRunner, AgentState
from llama_index.core.agent.function_calling.step import (
FunctionCallingAgentWorker,
DEFAULT_MAX_FUNCTION_CALLS,
)
from llama_index.core.base.llms.types import ChatMess... |
import math
from typing import List, Optional
from llama_index.core.agent.react.types import (
BaseReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.prompts import PromptTemplate
# taken from the paper
DEFAULT_REFLECTION_PROMPT_STR = ""... | import math
from typing import List, Optional
from llama_index.core.agent.react.types import (
BaseReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.bridge.pydantic import Field, BaseModel
from llama_index.core.prompts import PromptTemplate
# taken from the paper
DEFAULT_REFLECTION_PROMPT_STR = ""... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | __version__ = '0.40.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()... |
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
... | # model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .... | # Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformab... |
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
... | import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
... |
"""Chat generation output classes."""
from __future__ import annotations
from typing import Literal, Union
from pydantic import model_validator
from typing_extensions import Self
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain... | """Chat generation output classes."""
from __future__ import annotations
from typing import Literal, Union
from pydantic import computed_field
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dict... |
from pathlib import Path
from typing import Tuple
import librosa
import pytest
from executor.vggish import vggish_input
from executor.vggish_audio_encoder import VggishAudioEncoder
from jina import Document, DocumentArray, Executor
from tensorflow.python.framework import ops
@pytest.fixture(scope="module")
def encod... | from pathlib import Path
import librosa
import pytest
from executor.vggish import vggish_input
from executor.vggish_audio_encoder import VggishAudioEncoder
from jina import Document, DocumentArray, Executor
from tensorflow.python.framework import ops
def test_config():
ex = Executor.load_config(str(Path(__file__... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmengine.dataset import DefaultSampler
from mmengine.hooks import EMAHook
from mmengine.model import MomentumAnnealingEMA
from mmengine.runner import FlexibleRunner
from mmengine.testing.runner_test_case import ToyDataset, ToyMet... | # Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmengine.dataset import DefaultSampler
from mmengine.hooks import EMAHook
from mmengine.model import MomentumAnnealingEMA
from mmengine.testing.runner_test_case import ToyDataset, ToyMetric
with read_base():
from ._base_.bas... |
import os
from typing import Type, Optional, TypeVar
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
import pickle
import base64
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
fro... | import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import ProtoMix... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def p... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def p... |
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
Sparse... | from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.ReconstructionLoss import (
ReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultip... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from executor.audioclip_text import AudioCLIPTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 5... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from executor.audioclip_text import AudioCLIPTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 5... |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... |
"""
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_do... | """
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_do... |
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/youtube_vis.py', '../_base_/default_runtime.py'
]
detector = _base_.model
detector.pop('data_preprocessor')
detector.roi_head.bbox_head.update(dict(num_classes=40))
detector.roi_head.mask_head.update(dict(num_classes=40))
detector.train_cf... | _base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/youtube_vis.py', '../_base_/default_runtime.py'
]
detector = _base_.model
detector.pop('data_preprocessor')
detector.roi_head.bbox_head.update(dict(num_classes=40))
detector.roi_head.mask_head.update(dict(num_classes=40))
detector.train_cf... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def p... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentPars... |
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"LargeList",
"List",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
"Video",
"Pdf",
]
from .audio import Audio
from .features import Ar... | __all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"LargeList",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
"Video",
"Pdf",
]
from .audio import Audio
from .features import Array2D, Array... |
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
de... | import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
de... |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional... | from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
import pytest
import test_models as TM
import torch
from common_utils import cpu_and_cuda, set_rng_seed
from torchvision.prototype import models
@pytest.mark.parametrize("model_fn", (models.depth.stereo.raft_stereo_base,))
@pytest.mark.parametrize("model_mode", ("standard", "scripted"))
@pytest.mark.parametrize("dev"... | import pytest
import test_models as TM
import torch
from common_utils import cpu_and_gpu, set_rng_seed
from torchvision.prototype import models
@pytest.mark.parametrize("model_fn", (models.depth.stereo.raft_stereo_base,))
@pytest.mark.parametrize("model_mode", ("standard", "scripted"))
@pytest.mark.parametrize("dev",... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... |
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1... | """All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1... |
_base_ = './solo_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736)... | _base_ = './solo_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
# TODO: Update after mmcv.RandomChoiceResize finish refactor
type='Rando... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox_overlaps, get_box_tensor
def cast_tensor_type(x, scale=1., dtype=None):
if dtype == 'fp16':
# scale is for preventing overflows
x = (x / scale).half()
retu... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox_overlaps, get_box_tensor
def cast_tensor_type(x, scale=1., dtype=None):
if dtype == 'fp16':
# scale is for preventing overflows
x = (x / scale).half()
retu... |
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
vis_backends = [dict(type='LocalVisBackend'), dict(type='WandBVisBackend')]
visualizer = dict(vis_backends=vis_backends)
# MMEngine support the ... | # TODO: Awaiting refactoring
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# Set evaluation interval
evaluation = dict(interval=2)
# Set checkpoint interval
checkpoint_config = dict(interval=... |
import json
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
JINA_LOGO_URL = 'https://api.jina.ai/logo/logo-product/jina-core/horizontal-layout/colored/Product%20logo_Core_vertical_colorful%402x-margin.png'
GATEWAY_... | import json
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser
from jina.serve.networking import GrpcConnectionPool
from jina.serve.runtimes.gateway.graph.topology_graph import TopologyGraph
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
JINA_LOGO_URL = 'https://a... |
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, mask2ndarray, multi_apply,... | # Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, mask2ndarray, multi_apply,... |
from __future__ import annotations
import logging
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown
from langchain.agents.agent import AgentOutputParser
logger = lo... | from __future__ import annotations
import logging
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown
from langchain.agents.agent import AgentOutputParser
logger = lo... |
from sentence_transformers import models
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
print("# ------------------------------------------example with v2 distill-----------------------------------------")
doc_en... | from sentence_transformers import models
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
print("# ------------------------------------------example with v2 distill-----------------------------------------")
doc_en... |
import os
import time
import pytest
import requests
from docarray import Document
from jina import Client, Flow
from jina.helper import random_port
from jina.serve.runtimes.servers import BaseServer
from tests.integration.multiple_protocol_gateway.gateway.multiprotocol_gateway import (
MultiProtocolGateway,
)
cu... | import os
import time
import pytest
import requests
from docarray import Document
from jina import Client, Flow
from jina.helper import random_port
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from tests.integration.multiple_protocol_gateway.gateway.multiprotocol_gateway import (
MultiProtocolGatew... |
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "prithivida/S... | import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "prithivida/S... |
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# TODO(1.7): remove this file
import warnings as _warnings
with _warnings.catch_warnings():
_warnings.simplefilter("ignore")
# joblib imports may raise DeprecationWarning on certain Python
# versions
import joblib
from... | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# TODO(1.7): remove this file
import warnings as _warnings
with _warnings.catch_warnings():
_warnings.simplefilter("ignore")
# joblib imports may raise DeprecationWarning on certain Python
# versions
import joblib
from... |
import pytest
from jina import Client, Deployment, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.excepts import BadServer
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in ra... | import pytest
from jina import Client, Executor, requests
from jina._docarray import Document, DocumentArray
from jina.helper import random_port
class MyExecutor(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{do... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.sequence import TimeseriesGenerator
from keras.src.legacy.preprocessing.sequence import make_sampling_table
from keras.src.legacy.preprocessing.sequence import sk... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.sequence_utils import pad_sequences
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.le... |
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianNoise")
class GaussianNoise(layers.Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you cou... | from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianNoise")
class GaussianNoise(layers.Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you cou... |
import numpy as np
import pytest
from docarray import Document
@pytest.fixture
def doc():
d = Document(
text='test',
embedding=np.random.random(10),
tags={
'v': np.zeros(3),
'w': 0,
'x': 0.1,
'y': 1.5,
'z': 1,
'name':... | import pytest
from docarray import Document
import numpy as np
@pytest.fixture
def doc():
d = Document(
text='test',
embedding=np.random.random(10),
tags={
'x': 0.1,
'y': 1.5,
'z': 1,
'name': 'test',
'bar': '',
'labels... |
import csv
import pathlib
from typing import Any, Callable, Optional, Tuple
import PIL
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class GTSRB(VisionDataset):
"""`German Traffic Sign Recognition Benchmark (GTSRB) <https://ben... | import csv
import pathlib
from typing import Any, Callable, Optional, Tuple
import PIL
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class GTSRB(VisionDataset):
"""`German Traffic Sign Recognition Benchmark (GTSRB) <https://ben... |
from __future__ import annotations
from enum import Enum
from typing import Callable
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
dot_score,
euclidean_sim,
manhattan_sim,
pairwise_cos_sim,
pairwise_dot_score,
pairwise_euclidean_sim,
pairwise_manhatta... | from __future__ import annotations
from enum import Enum
from typing import Callable
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
dot_score,
euclidean_sim,
manhattan_sim,
pairwise_cos_sim,
pairwise_dot_score,
pairwise_euclidean_sim,
pairwise_manhatta... |
"""Document loaders."""
from langchain_core.document_loaders.base import BaseBlobParser, BaseLoader
from langchain_core.document_loaders.blob_loaders import Blob, BlobLoader, PathLike
from langchain_core.document_loaders.langsmith import LangSmithLoader
__all__ = [
"BaseBlobParser",
"BaseLoader",
"Blob",
... | from langchain_core.document_loaders.base import BaseBlobParser, BaseLoader
from langchain_core.document_loaders.blob_loaders import Blob, BlobLoader, PathLike
from langchain_core.document_loaders.langsmith import LangSmithLoader
__all__ = [
"BaseBlobParser",
"BaseLoader",
"Blob",
"BlobLoader",
"Pa... |
AMI_ID = {
# Managed by XGBoost team
"linux-amd64-gpu": {
"us-west-2": "ami-070080d04e81c5e39",
},
"linux-amd64-mgpu": {
"us-west-2": "ami-070080d04e81c5e39",
},
"windows-gpu": {
"us-west-2": "ami-07c14abcf529d816a",
},
"windows-cpu": {
"us-west-2": "ami-0... | AMI_ID = {
# Managed by XGBoost team
"linux-amd64-gpu": {
"us-west-2": "ami-08c3bc1dd5ec8bc5c",
},
"linux-amd64-mgpu": {
"us-west-2": "ami-08c3bc1dd5ec8bc5c",
},
"windows-gpu": {
"us-west-2": "ami-03c7f2156f93b22a7",
},
"windows-cpu": {
"us-west-2": "ami-0... |
# coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError("You need to install matplotlib and restart your session for plot_example.py.")
print("Loading data...")
# load or create your datas... | # coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError("You need to install matplotlib and restart your session for plot_example.py.")
print("Loading data...")
# load or create your datas... |
from __future__ import annotations
from copy import deepcopy
import pytest
from sentence_transformers import CrossEncoder
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture(scope="session")
def _reranker_bert_tiny_model... | from __future__ import annotations
import pytest
from sentence_transformers import CrossEncoder
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def reranker_bert_tiny_model() -> CrossEncoder:
return CrossEncoder... |
import logging
import os
import zlib
from contextlib import asynccontextmanager
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn... | import logging
import os
import zlib
from contextlib import asynccontextmanager
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn... |
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, ... | from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, ... |
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.AlphaDropout")
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance... | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.AlphaDropout")
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance... |
import builtins
import json
from typing import Optional, Type
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
class RuleSchema(BaseModel):
"""Schema for owner operations."""
... | import builtins
import json
from typing import Optional, Type
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
class RuleSchema(BaseModel):
"""Schema for owner operations."""
... |
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"LargeList",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
"Video",
"Pdf",
]
from .audio import Audio
from .features import Array2D, Array... | __all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"LargeList",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
"Video",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D... |
"""langchain-core version information and utilities."""
VERSION = "0.3.55"
| """langchain-core version information and utilities."""
VERSION = "0.3.54"
|
import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import Dataset
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
backend_device_c... | import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import Dataset
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
backend_device_c... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Union
from mmcv.cnn import ConvModule
from torch import Tensor
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class HTCMaskHead(FCNMaskHead):
"""Mask head for HTC.
Args:
... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class HTCMaskHead(FCNMaskHead):
def __init__(self, with_conv_res=True, *args, **kwargs):
super(HTCMaskHead, self).__init__(*a... |
from __future__ import annotations
__version__ = "3.3.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import export_dynamic_quantized_onnx_model, export_optimized_onnx_model
from sentence_transformers.cross_encoder.CrossEncoder import CrossEn... | from __future__ import annotations
__version__ = "3.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import export_dynamic_quantized_onnx_model, export_optimized_onnx_model
from sentence_transformers.cross_encoder.CrossEncoder import CrossEn... |
from pathlib import Path
import h5py
import numpy as np
import pandas as pd
import lightgbm as lgb
class HDFSequence(lgb.Sequence):
def __init__(self, hdf_dataset, batch_size):
"""
Construct a sequence object from HDF5 with required interface.
Parameters
----------
hdf_d... | from pathlib import Path
import h5py
import numpy as np
import pandas as pd
import lightgbm as lgb
class HDFSequence(lgb.Sequence):
def __init__(self, hdf_dataset, batch_size):
"""
Construct a sequence object from HDF5 with required interface.
Parameters
----------
hdf_d... |
from typing import Optional
import aiohttp
import numpy as np
import pytest
from docarray import DocumentArray
from docarray.document.generators import from_ndarray
from jina import Client, Flow
from jina.excepts import BadClientCallback
def validate(x):
raise NotImplementedError
@pytest.mark.skip(
reason... | from typing import Optional
import aiohttp
import numpy as np
import pytest
from docarray.document.generators import from_ndarray
from docarray import DocumentArray
from jina import Client, Flow
from jina.excepts import BadClientCallback
def validate(x):
raise NotImplementedError
@pytest.mark.skip(
reason... |
from collections import Counter
from typing import Tuple, Dict, Union, Optional, TYPE_CHECKING
import numpy as np
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri
if TYPE_CHECKING:
from docarray.typing import T
class TextDataMixin:
"""Provide helper functions for :class:`Document` to s... | from collections import Counter
from typing import Tuple, Dict, Union, Optional, TYPE_CHECKING
import numpy as np
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri
if TYPE_CHECKING:
from docarray.typing import T
class TextDataMixin:
"""Provide helper functions for :class:`Document` to s... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import librosa
from jina import Flow, Document, DocumentArray
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
cur_dir = os.path.dirname(os.path.abspath(__fil... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import librosa
from jina import Flow, Document, DocumentArray
from vggish import vggish_input
try:
from vggish_audio_encoder import VggishAudioEncoder
except:
from jinahub.encoders.audio.vggis... |
import random
import numpy as np
import pytest
from catboost_ranker import CatboostRanker
from jina import Document, DocumentArray
NUM_DOCS = 1000
NUM_MATCHES = 5
@pytest.fixture
def ranker():
return CatboostRanker(
query_features=['brand', 'price'],
match_features=['brand', 'price'],
re... | import random
import numpy as np
import pytest
from jina import Document, DocumentArray
from ..catboost_ranker import CatboostRanker
NUM_DOCS = 1000
NUM_MATCHES = 5
@pytest.fixture
def ranker():
return CatboostRanker(
query_features=['brand', 'price'],
match_features=['brand', 'price'],
... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUA... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUA... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.t... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
try:
imp... |
"""RTF (Rich Text Format) reader."""
from pathlib import Path
from typing import List, Union, Any, Dict, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class RTFReader(BaseReader):
"""RTF (Rich Text Format) Reader. Reads rtf file and convert to Documen... | """RTF (Rich Text Format) reader."""
from pathlib import Path
from typing import List, Union, Any, Dict, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class RTFReader(BaseReader):
"""RTF (Rich Text Format) Reader. Reads rtf file and convert to Document... |
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransfor... | from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from .ContrastiveLoss import SiameseDistanceMetric
from sentence_transformers.SentenceTransformer import SentenceTransformer
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
f... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
f... |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
from datasets.utils._hf_hub_fixes import create_repo, delete_repo
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_T... | import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
from datasets.utils._hf_hub_fixes import create_repo, delete_repo
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_T... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling o... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling o... |
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
pytestmark = ... | import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https:/... |
from setuptools import find_packages
import setuptools
setuptools.setup(
name="jina-executors",
version="0.0.1",
author='Jina Dev Team',
author_email='dev-team@jina.ai',
description="A selection of Executors for Jina",
url="https://github.com/jina-ai/executors",
classifiers=[
"Progr... | from setuptools import find_packages
import setuptools
setuptools.setup(
name="jinahub-indexer",
version="0.0.1",
author='Jina Dev Team',
author_email='dev-team@jina.ai',
description="A set of indexers for Jina",
url="https://github.com/jina-ai/indexers",
classifiers=[
"Programming ... |
from .hubert_dataset import BucketizeBatchSampler, CollateFnHubert, HuBERTDataSet
__all__ = [
"BucketizeBatchSampler",
"CollateFnHubert",
"HuBERTDataSet",
]
| from .hubert_dataset import (
BucketizeBatchSampler,
CollateFnHubert,
HuBERTDataSet,
)
__all__ = [
"BucketizeBatchSampler",
"CollateFnHubert",
"HuBERTDataSet",
]
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `Senten... | from __future__ import annotations
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column... |
from keras.src.layers.layer import Layer
from keras.src.metrics.metric import Metric
from keras.src.optimizers.optimizer import Optimizer
from keras.src.saving import saving_lib
from keras.src.saving.keras_saveable import KerasSaveable
def map_saveable_variables(saveable, store, visited_saveables):
# If the savea... | from keras.src.layers.layer import Layer
from keras.src.metrics.metric import Metric
from keras.src.optimizers.optimizer import Optimizer
from keras.src.saving import saving_lib
def map_trackable_variables(trackable, store, visited_trackables):
# If the trackable has already been saved, skip it.
if id(trackab... |
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_da... | """
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
from sentence_transformers import SentenceTransformer, InputExample, LoggingHandler, losses... |
# Copyright (c) OpenMMLab. All rights reserved.
import contextlib
import sys
import time
import torch
if sys.version_info >= (3, 7):
@contextlib.contextmanager
def profile_time(trace_name,
name,
enabled=True,
stream=None,
end... | import contextlib
import sys
import time
import torch
if sys.version_info >= (3, 7):
@contextlib.contextmanager
def profile_time(trace_name,
name,
enabled=True,
stream=None,
end_stream=None):
"""Print time spent by CP... |
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
... | from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'C... | # Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .openimages_metric import OpenImagesMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMet... |
# Copyright 2025 The HuggingFace Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in... | # Copyright 2024 The HuggingFace Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in... |
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW:
https://www.kaggle.com/datasets/jessicali9530/lfw-datas... | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233M... |
from docarray.base_document.mixins.proto import ProtoMixin
from docarray.base_document.mixins.update import UpdateMixin
__all__ = ['ProtoMixin', 'UpdateMixin']
| from docarray.base_document.mixins.plot import PlotMixin
from docarray.base_document.mixins.proto import ProtoMixin
__all__ = ['PlotMixin', 'ProtoMixin']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.