input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, a... | """LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, a... |
from pathlib import Path
from typing import List
import numpy as np
import pytest
import scipy
from jina import Document, DocumentArray, Executor
from jina.excepts import ExecutorFailToLoad
from tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.fixture(scope='session')
def basic_encoder() ... | from pathlib import Path
from typing import List
import numpy as np
import pytest
import scipy
from jina import Document, DocumentArray, Executor
from jina.excepts import PretrainedModelFileDoesNotExist
from tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.fixture(scope='session')
def bas... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor)... | import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor):... |
import copy as cp
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from docarray.dataclasses import is_multimodal
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import T
@lru_cache()
def _get_fields(dc):
retur... | import copy as cp
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from .dataclasses import is_multimodal
from .helper import typename
if TYPE_CHECKING:
from .typing import T
@lru_cache()
def _get_fields(dc):
return [f.name for f in field... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from mmdet.registry import MODELS
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Impleme... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@DETECTORS.register_module()
class LAD(KnowledgeDistill... |
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| _base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
# f... | _base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
tra... |
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _s... | from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _s... |
_base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmclassification dev-1.x branch
# import mmcls.models to trigger register_module in mmcls
custom_imports... | _base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmclassification dev-1.x branch
# import mmcls.models to trigger register_module in mmcls
custom_imports... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.optimizers import legacy
from keras.api.optimizers import schedules
from keras.src.optimizers import deserialize
from keras.src.optimizers import get
from keras.src.optimizers import ... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.optimizers import legacy
from keras.api.optimizers import schedules
from keras.src.optimizers import deserialize
from keras.src.optimizers import get
from keras.src.optimizers import ... |
"""Tool for the Google Trends"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
class GoogleTrendsQueryRun(BaseTool):
"""Tool that queries the Goo... | """Tool for the Google Trends"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
class GoogleTrendsQueryRun(BaseTool): # type: ignore[override]
""... |
# Copyright 2025 Custom Diffusion authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... | # Copyright 2024 Custom Diffusion authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... |
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import ... | # Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import ADE20KDataset, ADE20KPanopticDataset
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
f... |
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import SentenceTransformerModelCardCallback, SentenceTransformerModelCardData
from sentence_transformers.util import is_datase... | from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import (
SentenceTransformerModelCardCallback,
SentenceTransformerModelCardData,
)
from sentence_transformers.util imp... |
import pytest
from backend.data import db
from backend.executor import ExecutionScheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.... | import pytest
from backend.data import db
from backend.executor import ExecutionScheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.... |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable... | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable... |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ... | # Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ... |
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .au... | from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import Autoe... |
from typing import Any, Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
from b... | from typing import Any, Literal
import requests
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, CredentialsMetaInput, Sc... |
import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langch... | import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langch... |
import re
import torch
from torch.utils.hipify.hipify_python import PYTORCH_MAP, PYTORCH_TRIE
# It is not a good idea to directly apply hipify_torch to codegen, which will be vulnerable to cases like:
# "...
# from ..codecache import CudaKernelParamCache
# ..."
# In such cases, we do not need to hipify_torch ... | import re
import torch
from torch.utils.hipify.hipify_python import PYTORCH_MAP, PYTORCH_TRIE
# It is not a good idea to directly apply hipify_torch to codegen, which will be vulnerable to cases like:
# "...
# from ..codecache import CudaKernelParamCache
# ..."
# In such cases, we do not need to hipify_torch ... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmengine
from mmengine import Config, DictAction
from mmengine.evaluator import Evaluator
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Evalua... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from mmengine.config import Config, DictAction
from mmengine.fileio import load
from mmdet.datasets import build_dataset
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Eval... |
"""
Test the standard tests on the custom chat model in the docs
"""
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_tests.unit_tests import ChatModelUnitTests
from .custom_chat_model import ChatParrotLink
class TestChatParrotLinkUnit(ChatModelUnitTests):
@property
def... | """
Test the standard tests on the custom chat model in the docs
"""
from typing import Type
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_tests.unit_tests import ChatModelUnitTests
from .custom_chat_model import ChatParrotLink
class TestChatParrotLinkUnit(ChatModelUnitTest... |
import pytest
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
from langchain_core.stores import InMemoryStore
# Check against standard tests
class TestSyncInMemoryStore(BaseStoreSyncTests):
@pytest.fixture
def kv_store(self) -> InMemoryStore:
... | import pytest
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
from langchain_core.stores import InMemoryStore
# Check against standard tests
class TestSyncInMemoryStore(BaseStoreSyncTests):
@pytest.fixture
def kv_store(self) -> InMemoryStore:
... |
# coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError("You need to install matplotlib and restart your session for plot_example.py.")
print("Loading data...")
# load or create your datas... | # coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You need to install matplotlib and restart your session for plot_example.py.')
print('Loading data...')
# load or create your datas... |
from typing import Union, Sequence, List, Tuple
from ai21.models import ChatMessage as J2ChatMessage, RoleType
from ai21.models.chat import (
ChatMessage as AI21ChatMessage,
AssistantMessage,
ToolMessage as AI21ToolMessage,
UserMessage,
SystemMessage,
)
from llama_index.core.base.llms.types import ... | from typing import Union, Sequence, List, Tuple
from ai21.models import ChatMessage as J2ChatMessage, RoleType
from ai21.models.chat import (
ChatMessage as AI21ChatMessage,
AssistantMessage,
ToolMessage as AI21ToolMessage,
UserMessage,
SystemMessage,
)
from llama_index.core.base.llms.types import ... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | epsilla_config = {
"protocol": 'http',
"host": 'localhost',
"port": 8888,
"is_self_hosted": True,
"db_path": "/epsilla",
"db_name": "tony_doc_array_test",
}
def index_len(index, max_len=20):
return len(index.filter("", limit=max_len))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import hsv_to_rgb
from keras.src... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import map_coordinates
from kera... |
"""
This module provides backward-compatible exports of core language model classes.
These classes are re-exported for compatibility with older versions of LangChain
and allow users to import language model interfaces from a stable path.
Exports:
- LLM: Abstract base class for all LLMs
- BaseLLM: Deprecated o... | # Backwards compatibility.
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import (
LLM,
BaseLLM,
)
__all__ = [
"LLM",
"BaseLLM",
"BaseLanguageModel",
]
|
"""LLM Prompt Program."""
from abc import abstractmethod
from typing import Any, Generic, Optional, Type, TypeVar
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.types import BasePydanticProgram, Model
LM = TypeVar("LM")
class Bas... | """LLM Prompt Program."""
from abc import abstractmethod
from typing import Any, Generic, Optional, Type, TypeVar
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.types import BasePydanticProgram, Model
LM = TypeVar("LM")
class Bas... |
from .document import DocumentArray
from .storage.sqlite import StorageMixins, SqliteConfig
__all__ = ['SqliteConfig', 'DocumentArraySqlite']
class DocumentArraySqlite(StorageMixins, DocumentArray):
"""
DocumentArray that stores Documents in a `SQLite database <https://www.sqlite.org/index.html>`_.
This... | from .document import DocumentArray
from .storage.sqlite import StorageMixins, SqliteConfig
__all__ = ['SqliteConfig', 'DocumentArraySqlite']
class DocumentArraySqlite(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
|
"""Argparser module for Pod runtimes"""
import argparse
from jina import helper
from jina.enums import PodRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_pod_parser(parser):
"""Mixing in arguments required by :class:`Pod` into the given parser.
:param parser: ... | """Argparser module for Pod runtimes"""
import argparse
from jina import helper
from jina.enums import PodRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_pod_parser(parser, port_monitoring=True):
"""Mixing in arguments required by :class:`Pod` into the given parse... |
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, ... | from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments, losses
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.models import Pooling, Transfor... |
import importlib
from types import ModuleType
import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from ...utils import needs_py39, needs_py310
@pytest.fixture(
name="mod",
params=[
"tutorial001",
pytest.param("tutorial001_py310", marks=needs_py310),
... | import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from docs_src.cookie_params.tutorial001 import app
@pytest.mark.parametrize(
"path,cookies,expected_status,expected_response",
[
("/items", None, 200, {"ads_id": None}),
("/items", {"ads_id": "ads_track"},... |
import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import Docum... | import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import Docum... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
tea... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
tea... |
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner imp... | # Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .dynamic_soft_label_assigner imp... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List, Tuple
from jina import DocumentArray, Executor, requests
class MatchMerger(Executor):
"""
The MatchMerger merges the results of shards by appending all matches..
"""
def __... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List, Tuple
from jina import DocumentArray, Executor, requests
class MatchMerger(Executor):
"""
The MatchMerger merges the results of shards by appending all matches..
:param default... |
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class WeightedLayerPooling(Module):
"""Token embeddings are weighted mean of their diff... | from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.registry import TASK_UTILS
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder div... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide... |
import functorch._C
import torch
from functorch._C import dim as _C
from .tree_map import tree_flatten, tree_map
from .wrap_type import wrap_type
_C._patch_tensor_class()
dims, DimList, dimlists = _C.dims, _C.DimList, _C.dimlists
class DimensionMismatchError(Exception):
pass
class DimensionBindError(Exceptio... | import functorch._C
import torch
from functorch._C import dim as _C
from .tree_map import tree_flatten, tree_map
from .wrap_type import wrap_type
_C._patch_tensor_class()
dims, DimList, dimlists = _C.dims, _C.DimList, _C.dimlists
class DimensionMismatchError(Exception):
pass
class DimensionBindError(Exceptio... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='CenterNet',
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretra... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='CenterNet',
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='Pretra... |
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders ... | from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders ... |
import pytest
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.parametrize(
"text,answer,sources",
[
(
"This Agreement is governed by English law.\nSOURCES: 28-pl",
"This Agreement is governed ... | import pytest
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.parametrize(
"text,answer,sources",
[
(
"This Agreement is governed by English law.\nSOURCES: 28-pl",
"This Agreement is governed ... |
from torchaudio._internal.module_utils import dropping_support
# Initialize extension and backend first
from . import _extension # noqa # usort: skip
from ._backend import ( # noqa # usort: skip
AudioMetaData,
get_audio_backend,
info,
list_audio_backends,
load as _load,
save as _save,
s... | # Initialize extension and backend first
from . import _extension # noqa # usort: skip
from ._backend import ( # noqa # usort: skip
AudioMetaData,
get_audio_backend,
info,
list_audio_backends,
load,
save,
set_audio_backend,
)
from . import ( # noqa: F401
compliance,
datasets,
... |
_base_ = './faster-rcnn_hrnetv2p-w40-1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
... | _base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
... |
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
class TextUrl(AnyUrl):
"""
URL to a text file.
Cane be remote (web) URL, or a local file path.
"""
... | from typing import Optional
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
class TextUrl(AnyUrl):
"""
URL to a text file.
Cane be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self) -> No... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import ... | import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import ... |
from .store import SupabaseIntegrationCredentialsStore
from .types import APIKeyCredentials, Credentials, OAuth2Credentials
__all__ = [
"SupabaseIntegrationCredentialsStore",
"Credentials",
"APIKeyCredentials",
"OAuth2Credentials",
]
| from .store import SupabaseIntegrationCredentialsStore
from .types import Credentials, APIKeyCredentials, OAuth2Credentials
__all__ = [
"SupabaseIntegrationCredentialsStore",
"Credentials",
"APIKeyCredentials",
"OAuth2Credentials",
]
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import Sc... | import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import Sc... |
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .base_detr import DetectionTransformer
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .co... | # Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .base_detr import DetectionTransformer
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .co... |
"""Agent components."""
from typing import Any, Callable, Dict, Optional, Set
from llama_index.core.base.query_pipeline.query import (
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.query_pipeline.components.function import (
FnComponent,
get_parameters,
)
# fr... | """Agent components."""
from typing import Any, Callable, Dict, Optional, Set
from llama_index.core.base.query_pipeline.query import (
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.query_pipeline.components.function import (
FnComponent,
get_parameters,
)
# fr... |
import copy
from typing import Any, List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
... | import copy
from typing import List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super... |
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.... | # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.... |
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that l... | # Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that l... |
from .document import DocumentArray
from .storage.memory import StorageMixins
class DocumentArrayInMemory(StorageMixins, DocumentArray):
"""
Default DocumentArray that stores Documents in memory.
With this implementation, :meth:`match` and :meth:`find` perform exact (exhaustive) vector search.
Exampl... | from .document import DocumentArray
from .storage.memory import StorageMixins
class DocumentArrayInMemory(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
|
import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langch... | import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langch... |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.2'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... |
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
**Author**: `Moto Hira <moto@meta.com>`__
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transfo... | # -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
**Author**: `Moto Hira <moto@meta.com>`__
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transfo... |
import pytest
from backend.util.service import AppService, expose, get_service_client
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
@classmethod
def get_port(cls) -> int:
return TEST_SERVICE_PORT
@expose
def add(self, a: int, b: ... | import pytest
from backend.util.service import AppService, expose, get_service_client
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
@classmethod
def get_port(cls) -> int:
return TEST_SERVICE_PORT
@expose
def add(self, a: int, b: ... |
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from ... | """
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import sys
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_tran... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone,
... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone,
... |
from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**k... | from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**k... |
import torchaudio
_STREAM_READER = [
"StreamReader",
]
_STREAM_WRITER = [
"StreamWriter",
]
_PLAYBACK = [
"play_audio",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER + _PLAYBACK
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not torchaudio._extension._FFMPEG_INITIALIZE... | import torchaudio
_STREAM_READER = [
"StreamReader",
]
_STREAM_WRITER = [
"StreamWriter",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not torchaudio._extension._FFMPEG_INITIALIZED:
torchaudio._extension._init_ffmp... |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url, hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Huggin... | from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url, hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Huggin... |
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, TransformersTokenizerWrapper, WordTokenizer
__all__ = [
"WordTokenizer",
"WhitespaceTokenizer",
"PhraseTokenizer",
"ENGLIS... | from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
|
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import Embedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.url import (
AnyUrl,
AudioUrl,
... | from docarray.typing.id import ID
from docarray.typing.tensor.embedding.embedding import Embedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.url import AnyUrl, ImageUrl, Mesh3DUrl, PointCloud3DUrl, TextUrl
__all__ = [
'NdArray',
... |
from ._vggish import VGGISH, VGGishBundle
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
... | from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
]
|
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
]
| from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .squim_pipeline import SQUIM_OBJECTIVE, SQUIM_SUBJECTIVE, SquimObjectiveBundle, SquimSubjectiveBundle
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EM... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, TypeVar, Optional, Type
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well... |
"""Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory, get_rabit_args
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cl... | """Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory
@pytest.mark.param... |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects b... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Obje... |
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations... | _base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
... |
from typing import Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal, is_simple_tensor
... | from typing import Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def erase_image_tensor(
image: torch.Tensor, i: int, j: int,... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import (
UpstashRedisChatMessageHistory,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation w... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import (
UpstashRedisChatMessageHistory,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation w... |
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEnc... | from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEn... |
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.utils._internal.misc import is_jax_available
if is_jax_available():
import jax.numpy as jnp
from jax import jit
from docarray.typing import JaxArray
@pytest.mark.jax
def test_basic_jax_operation():
def ba... | from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.utils._internal.misc import is_jax_available
if is_jax_available():
import jax.numpy as jnp
from jax import jit
from docarray.typing import JaxArray
@pytest.mark.jax
def test_basic_jax_operation():
def ba... |
import os
import pathlib
import pytest
from docarray.helper import (
protocol_and_compress_from_file_path,
add_protocol_and_compress_to_file_path,
filter_dict,
get_full_version,
_safe_cast_int,
)
@pytest.mark.parametrize(
'file_path', ['doc_array', '../docarray', './a_folder/docarray']
)
@py... | import os
import pathlib
import pytest
from docarray.helper import (
protocol_and_compress_from_file_path,
add_protocol_and_compress_to_file_path,
filter_dict,
get_full_version,
)
@pytest.mark.parametrize(
'file_path', ['doc_array', '../docarray', './a_folder/docarray']
)
@pytest.mark.parametriz... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import is_installed
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .base import BaseOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_opt... | # Copyright (c) OpenMMLab. All rights reserved.
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor... |
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""... | from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""... |
"""monday.com reader."""
from typing import Dict, List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MondayReader(BaseReader):
"""
monday.com reader. Reads board's data by a GraphQL query.
Args:
api_key (str): monday.com ... | """monday.com reader."""
from typing import Dict, List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MondayReader(BaseReader):
"""
monday.com reader. Reads board's data by a GraphQL query.
Args:
api_key (str): monday.com A... |
"""Methods for scaling, centering, normalization, binarization, and more."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._data import (
Binarizer,
KernelCenterer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PowerTransformer,
QuantileTransformer,
Rob... | """Methods for scaling, centering, normalization, binarization, and more."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._data import (
Binarizer,
KernelCenterer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PowerTransformer,
QuantileTransformer,
Rob... |
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from senten... | from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from senten... |
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super(CLIPModel, self).__init__()
if processor_name is Non... | from typing import Dict, Union
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super(CLIPModel, self).__init__()
if processor_name is None:
... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.data_elements.mask import BitmapMasks
from mmdet.models.dense_heads import SOLOV2Head
def... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.core.mask import BitmapMasks
from mmdet.models.dense_heads import SOLOV2Head
def _rand_ma... |
"""
Example of training survival model with Dask on CPU
===================================================
"""
import os
import dask.array as da
import dask.dataframe as dd
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client: Cli... | """
Example of training survival model with Dask on CPU
===================================================
"""
import os
import dask.dataframe as dd
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# Load an example surv... |
from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``... | from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``... |
from abc import ABC
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
class BaseBackendMixin(ABC):
TYPE_MAP: Dict
def _init_storage(
self,
_docs: Optional['DocumentArra... | from abc import ABC
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
class BaseBackendMixin(ABC):
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None... |
from __future__ import annotations
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field, SecretStr
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearch(BaseTool): ... | from __future__ import annotations
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearch(BaseTool): # type: ignore[override]
"""Tool t... |
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_la... | from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_la... |
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import (AdaptiveAvgPool2d, FrozenBatchNorm2d,
adaptive_avg_pool2d)
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .... | # Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMom... |
import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_reque... | import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_reque... |
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.47"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_in... | """Top-level imports for LlamaIndex."""
__version__ = "0.12.46"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_in... |
from typing import TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing import NdArray
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.url_3d.url_3d import Url3D
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_... | from typing import TYPE_CHECKING, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T', bound='PointCloud3DUrl')
class PointCloud3DUrl(Url3D):
... |
"""Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms... | """Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms... |
from abc import ABC
class BaseStandardTests(ABC):
""":private:"""
def test_no_overrides_DO_NOT_OVERRIDE(self) -> None:
"""Test that no standard tests are overridden.
:private:
"""
# find path to standard test implementations
comparison_class = None
def explor... | from abc import ABC
class BaseStandardTests(ABC):
"""
:private:
"""
def test_no_overrides_DO_NOT_OVERRIDE(self) -> None:
"""
Test that no standard tests are overridden.
:private:
"""
# find path to standard test implementations
comparison_class = None
... |
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from senten... | from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from senten... |
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],... | import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.