input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
from collections.abc import Sequence
from inspect import signature
from typing import Optional, Union
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCo... | from inspect import signature
from typing import List, Optional, Sequence, Union
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCompressorPipeline(Base... |
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import build_match_cost
from .match_cost import (BBoxL1Cost, ClassificationCost, DiceCost,
FocalLossCost, IoUCost)
__all__ = [
'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
'FocalLossCost', 'DiceCost'
]
| # Copyright (c) OpenMMLab. All rights reserved.
from .builder import build_match_cost
from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost
__all__ = [
'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
'FocalLossCost'
]
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into th... | """Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into th... |
from docarray import BaseDoc
from docarray.typing import ID
def test_set_id():
class MyDocument(BaseDoc):
id: ID
d = MyDocument(id="123")
assert isinstance(d.id, ID)
assert d.id == "123"
| from docarray import BaseDocument
from docarray.typing import ID
def test_set_id():
class MyDocument(BaseDocument):
id: ID
d = MyDocument(id="123")
assert isinstance(d.id, ID)
assert d.id == "123"
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.evaluator import DumpResults
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.registry import RUNNER... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.evaluator import DumpResults
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.registry import RUNNER... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
get_paths,
)
@pyte... | from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
get_paths,
)
@pyte... |
import asyncio
import datetime
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.toggl.dto import TogglTrackItem, TogglOutFormat
class TogglReader(BaseReader):
def __init__(
self, api_token: str, u... | import asyncio
import datetime
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.toggl.dto import TogglTrackItem, TogglOutFormat
class TogglReader(BaseReader):
def __init__(
self, api_token: str, u... |
from pathlib import Path
import numpy as np
import paddlehub as hub
import pytest
from jina import Document, DocumentArray, Executor
from ...text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():... | from pathlib import Path
import pytest
import numpy as np
import paddlehub as hub
from jina import Document, DocumentArray, Executor
from ...text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():... |
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
class ToTensor(Transform):
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) ->... | import warnings
from typing import Any, Dict, List, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
from typing_extensions import Literal
from ._transform imp... |
from .audio_clip_encoder import AudioCLIPEncoder
| from .audio_clip_encoder import AudioCLIPEncoder |
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt'... | _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt'... |
import argparse
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :c... | import argparse
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :c... |
"""(Unofficial) Google Keep reader using gkeepapi."""
import json
import os
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GoogleKeepReader(BaseReader):
"""
Google Keep reader.
Reads notes from Google Keep
"""
... | """(Unofficial) Google Keep reader using gkeepapi."""
import json
import os
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GoogleKeepReader(BaseReader):
"""Google Keep reader.
Reads notes from Google Keep
"""
de... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUA... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structu... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import (BaseBoxes, HorizontalBoxes, bbox2distance,
distance2bbox, get_box_tensor)
from .base_bbox_co... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import (HorizontalBoxes, bbox2distance,
distance2bbox, get_box_tensor)
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCod... |
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEmbeddingSimilarityEvaluator,
SparseEncoder,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initializ... | from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEmbeddingSimilarityEvaluator,
SparseEncoder,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLM... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
from mmengine.fileio import load
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine ... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
sta... |
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_size_divisor=64),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
... | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'nor... |
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-... | import os
import pickle
from typing import Optional, Iterable, Tuple
from jina import Executor, requests, DocumentArray
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embe... |
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zero... | import numpy as np
from docarray import BaseDoc
from docarray.array import DocArrayStacked
from docarray.array.stacked.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tenso... |
from cupy import * # noqa: F403
# from cupy import * doesn't overwrite these builtin names
from cupy import abs, max, min, round # noqa: F401
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
# See the comment in the numpy __init__.py
__import__(__package__ + '.linalg'... | from cupy import * # noqa: F403
# from cupy import * doesn't overwrite these builtin names
from cupy import abs, max, min, round # noqa: F401
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
# See the comment in the numpy __init__.py
__import__(__package__ + '.linalg'... |
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str... | from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str... |
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHa... | # CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHa... |
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _s... | from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from ..base.backend import BaseBackendMixin, TypeMap
from ....helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHEC... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentr... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... |
"""This module is deprecated and will be removed in a future release.
Please use LangChainTracer instead.
"""
from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any: # noqa: ARG001
"""Throw an error because this has been replaced by get_headers."""
msg = (
"get_headers for LangCha... | """This module is deprecated and will be removed in a future release.
Please use LangChainTracer instead.
"""
from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any:
"""Throw an error because this has been replaced by get_headers."""
msg = (
"get_headers for LangChainTracerV1 is no... |
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "hf_hZEmnoOEYISjraJt... | import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "DSUser"
CI_HUB_USER_FULL_NAME = "Dummy Datasets User"
CI_HUB_USER_TOKEN = "hf_iiTdXZFWohTKHEfuQWoEmmmaEVCF... |
_base_ = './mask-rcnn_r101_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
... | _base_ = './mask_rcnn_r101_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import ElasticKnnSearch, ElasticVectorSearch
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handli... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import ElasticKnnSearch, ElasticVectorSearch
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handli... |
from abc import ABC, abstractmethod
from langchain_core.runnables.config import run_in_executor
from pydantic import BaseModel, Field
class SparseVector(BaseModel, extra="forbid"):
"""
Sparse vector structure
"""
indices: list[int] = Field(..., description="indices must be unique")
values: list[... | from abc import ABC, abstractmethod
from typing import List
from langchain_core.runnables.config import run_in_executor
from pydantic import BaseModel, Field
class SparseVector(BaseModel, extra="forbid"):
"""
Sparse vector structure
"""
indices: List[int] = Field(..., description="indices must be un... |
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='Darknet',
depth=53,
out_indices=(3, 4, 5),
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')),
neck=dict(
type='YOLOV3Neck',
num_scal... | _base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='Darknet',
depth=53,
out_indices=(3, 4, 5),
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')),
neck=dict(
type='YOLOV3Neck',
num_scal... |
from langchain_core.embeddings import Embeddings
from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests
from langchain_openai import AzureOpenAIEmbeddings
class TestAzureOpenAIStandard(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return AzureOpenAIEmb... | from typing import Tuple, Type
from langchain_core.embeddings import Embeddings
from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests
from langchain_openai import AzureOpenAIEmbeddings
class TestAzureOpenAIStandard(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[Embeddings... |
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which... | """Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which... |
"""Google Search tool spec."""
import json
import httpx
import urllib.parse
from typing import Dict, List, Optional, Union
from llama_index.core.tools.tool_spec.base import BaseToolSpec
QUERY_URL_TMPL = (
"https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}"
)
class GoogleSearchToolSpec(... | """Google Search tool spec."""
import json
import urllib.parse
from typing import Optional
import requests
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
QUERY_URL_TMPL = (
"https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}"
)
... |
""" Helper module to manage torch vision models """
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
from torchvision.models.alexnet import __all__ as all_alexnet_models
from torchvision.models.densenet import __all__ as all_densenet_models
from torc... | """ Helper module to manage torch vision models """
from typing import Optional
import torch
import torchvision.models as models
import torch.nn as nn
import numpy as np
from torchvision.models.resnet import __all__ as all_resnet_models
from torchvision.models.alexnet import __all__ as all_alexnet_models
from torchvi... |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a... | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a... |
from typing import Any, Callable, Dict, Type
import orjson
from docarray.utils._internal.pydantic import is_pydantic_v2
if not is_pydantic_v2:
from pydantic.json import ENCODERS_BY_TYPE
else:
ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = {
bytes: lambda o: o.decode(),
frozenset: l... | import orjson
from pydantic.json import ENCODERS_BY_TYPE
def _default_orjson(obj):
"""
default option for orjson dumps.
:param obj:
:return: return a json compatible object
"""
from docarray.base_doc import BaseNode
if isinstance(obj, BaseNode):
return obj._docarray_to_json_compat... |
import hashlib
import io
import os
import urllib
import warnings
from typing import List, Optional, Union
import torch
from tqdm import tqdm
from .audio import load_audio, log_mel_spectrogram, pad_or_trim
from .decoding import DecodingOptions, DecodingResult, decode, detect_language
from .model import Whisper, ModelD... | import hashlib
import io
import os
import urllib
import warnings
from typing import List, Optional, Union
import torch
from tqdm import tqdm
from .audio import load_audio, log_mel_spectrogram, pad_or_trim
from .decoding import DecodingOptions, DecodingResult, decode, detect_language
from .model import Whisper, ModelD... |
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACT... | import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACT... |
PREFIX = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text base... | # flake8: noqa
PREFIX = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human... |
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.servers import BaseServer
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
from jina.serve.runtimes.... | import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import _generate_pod_args
... |
_base_ = './cornernet_hourglass104_8xb6-210e-mstest_coco.py'
train_dataloader = dict(batch_size=3)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (3 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
| _base_ = './cornernet_hourglass104_mstest_8x6_210e_coco.py'
train_dataloader = dict(batch_size=3)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (3 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
import json
import re
from datetime import datetime
from typing import List
import requests
from tenacity import retry, stop_after_attempt, wait_random_exponential
def correct_date(yr, dt):
"""
Some transcripts have incorrect date, correcting it.
Args:
yr (int): actual
dt (datetime): giv... | import json
import re
from datetime import datetime
from typing import List
import requests
from tenacity import retry, stop_after_attempt, wait_random_exponential
def correct_date(yr, dt):
"""Some transcripts have incorrect date, correcting it.
Args:
yr (int): actual
dt (datetime): given da... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.model.wrappers import (MM... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.model.wrappers import (MM... |
import wave
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16, AudioNdArray
from docarray.typing.url.any_url import AnyUrl
if TYP... | import wave
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16, AudioNdArray
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from py... |
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)... | import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | # Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... |
from typing import Any, Callable, Optional
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default: 1000 i... | from typing import Any, Callable, Optional, Tuple
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default:... |
from __future__ import annotations
from contextlib import nullcontext
import pytest
import torch
import tqdm
from torch.optim import Adam
from transformers import set_seed
from sentence_transformers import InputExample, SentenceTransformer, losses
@pytest.mark.parametrize(
["train_samples_mnrl", "train_samples... | from contextlib import nullcontext
from typing import List
import pytest
import torch
import tqdm
from torch.optim import Adam
from transformers import set_seed
from sentence_transformers import InputExample, SentenceTransformer, losses
@pytest.mark.parametrize(
["train_samples_mnrl", "train_samples_cmnrl", "sa... |
"""Init file."""
from llama_index.tools.zapier.base import (
ACTION_URL_TMPL,
ZapierToolSpec,
)
__all__ = ["ACTION_URL_TMPL", "ZapierToolSpec"]
| """Init file."""
from llama_index.tools.zapier.base import (
ACTION_URL_TMPL,
ZapierToolSpec,
)
__all__ = ["ACTION_URL_TMPL", "ZapierToolSpec"]
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.... | from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.... |
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import SERIALIZERS, pickle_and_unpickle_object
def reset_feature_fraction(boosting_round):
return 0.6 if boosting_round < 15 else 0.8
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_early_stopping_callback_is_picklable(serializer):
... | # coding: utf-8
import pytest
import lightgbm as lgb
from .utils import SERIALIZERS, pickle_and_unpickle_object
def reset_feature_fraction(boosting_round):
return 0.6 if boosting_round < 15 else 0.8
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_early_stopping_callback_is_picklable(serializer):
... |
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import NdArray, PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 't... | import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray, PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR... |
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
... | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
... |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... | """**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... |
from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_ANYSCALE_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_ANYSCALE_API_VERSION = ""
LLAMA_MO... | from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_ANYSCALE_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_ANYSCALE_API_VERSION = ""
LLAMA_MO... |
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.distributed.rpc import is_available
from mmengine.dist import is_main_process
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
try:
from torch.distributed.optim import \
ZeroRedundancyOptimiz... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.distributed.rpc import is_available
from mmengine.dist import is_main_process
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
try:
from torch.distributed.optim import \
ZeroRedundancyOptimiz... |
import pytest
@pytest.mark.parametrize(
"model,expected",
[
("librispeech", ["the", "captain", "shook", "his", "head"]),
("librispeech-3-gram", ["the", "captain", "shook", "his", "head"]),
],
)
def test_decoder_from_pretrained(model, expected, emissions):
from torchaudio.prototype.ctc_... | import pytest
@pytest.mark.parametrize(
"model,expected",
[
("librispeech", ["the", "captain", "shook", "his", "head"]),
("librispeech-3-gram", ["the", "captain", "shook", "his", "head"]),
],
)
def test_decoder_from_pretrained(model, expected, emissions):
from torchaudio.prototype.ctc_... |
"""Pathway reader."""
import json
from typing import List, Optional
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copied from https://github.com/pathwaycom/pathway/blob/main/python/pathway/xpacks/llm/vector_store.py
# to remove dependency on Path... | """Pathway reader."""
import json
from typing import List, Optional
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copied from https://github.com/pathwaycom/pathway/blob/main/python/pathway/xpacks/llm/vector_store.py
# to remove dependency on Path... |
"""Tool for the Dataherald Hosted API"""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.dataherald import DataheraldAPIWrapper
class DataheraldTextToS... | """Tool for the Dataherald Hosted API"""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.dataherald import DataheraldAPIWrapper
class DataheraldTextToS... |
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
fr... | from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
fr... |
import datetime
import uuid
from unittest.mock import MagicMock, patch
from langsmith.schemas import Example
from langchain_core.document_loaders import LangSmithLoader
from langchain_core.documents import Document
def test_init() -> None:
LangSmithLoader(api_key="secret")
EXAMPLES = [
Example(
in... | import datetime
import uuid
from unittest.mock import MagicMock, patch
from langsmith.schemas import Example
from langchain_core.document_loaders import LangSmithLoader
from langchain_core.documents import Document
def test_init() -> None:
LangSmithLoader(api_key="secret")
EXAMPLES = [
Example(
in... |
import pytest
from llama_index.core.node_parser.text.semantic_double_merging_splitter import (
SemanticDoubleMergingSplitterNodeParser,
LanguageConfig,
)
from llama_index.core.schema import Document
doc = Document(
text="Warsaw: Warsaw, the capital city of Poland, is a bustling metropolis located on the b... | import pytest
from llama_index.core.node_parser.text.semantic_double_merging_splitter import (
SemanticDoubleMergingSplitterNodeParser,
LanguageConfig,
)
from llama_index.core.schema import Document
doc = Document(
text="Warsaw: Warsaw, the capital city of Poland, is a bustling metropolis located on the b... |
from keras.src.api_export import keras_export
from keras.src.optimizers import adam
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.AdamW"])
class AdamW(adam.Adam):
"""Optimizer that implements the AdamW algorithm.
AdamW optimization is a stochastic gradient descent method that is... | from keras.src.api_export import keras_export
from keras.src.optimizers import adam
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.AdamW"])
class AdamW(adam.Adam):
"""Optimizer that implements the AdamW algorithm.
AdamW optimization is a stochastic gradient descent method that is... |
_base_ = 'grounding_dino_swin-t_pretrain_obj365.py'
o365v1_od_dataset = dict(
type='ODVGDataset',
data_root='data/objects365v1/',
ann_file='o365v1_train_odvg.json',
label_map_file='o365v1_label_map.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base... | _base_ = 'grounding_dino_swin-t_pretrain_obj365.py'
o365v1_od_dataset = dict(
type='ODVGDataset',
data_root='data/objects365v1/',
ann_file='o365v1_train_odvg.jsonl',
label_map_file='o365v1_label_map.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_bas... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Optional
import spacy
from docarray import DocumentArray
from jina import Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'le... |
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.f... | from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.fixt... |
import os
import urllib
import numpy as np
import PIL
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.j... | import os
import urllib
import numpy as np
import PIL
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.j... |
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatB... | # Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBu... |
"""**Text Splitters** are classes for splitting text.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> TextSplitter --> <name>TextSplitter # Example: CharacterTextSplitter
RecursiveCharacterTextSplitter --> <name>TextSplitter
Note: **MarkdownHea... | """**Text Splitters** are classes for splitting text.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> TextSplitter --> <name>TextSplitter # Example: CharacterTextSplitter
RecursiveCharacterTextSplitter --> <name>TextSplitter
Note: **MarkdownHea... |
"""Test for Serializable base class"""
import json
import os
from typing import Any
from unittest.mock import patch
import pytest
from langchain_core.load.dump import dumps
from langchain_core.load.serializable import Serializable
from pydantic import ConfigDict, Field, model_validator
class Person(Serializable):
... | """Test for Serializable base class"""
import json
import os
from typing import Any
from unittest.mock import patch
import pytest
from langchain_core.load.dump import dumps
from langchain_core.load.serializable import Serializable
from pydantic import ConfigDict, Field, model_validator
class Person(Serializable):
... |
import pytest
from importlib.util import find_spec
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
no_packages = (
find_spec("psycopg2") is None
or find_spec("sqlalchemy") is None
or find_spec("asyncpg") is None
)
def test_cl... | import pytest
from importlib.util import find_spec
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
no_packages = find_spec("psycopg2") is None or find_spec("sqlalchemy") is None or find_spec("asyncpg") is None
def test_class():
names... |
import re
from collections.abc import Sequence
from typing import Optional
from langchain_core.messages import BaseMessage
def _is_openai_data_block(block: dict) -> bool:
"""Check if the block contains multimodal data in OpenAI Chat Completions format."""
if block.get("type") == "image_url":
if (
... | import re
from collections.abc import Sequence
from typing import Optional
from langchain_core.messages import BaseMessage
def _is_openai_data_block(block: dict) -> bool:
"""Check if the block contains multimodal data in OpenAI Chat Completions format."""
if block.get("type") == "image_url":
if (
... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from ...paddle_image import ImagePaddlehubEncoder
@pytest.mark.parametrize('arr_in', [
(np.ones((3, 224, 2... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from jinahub.encoder.paddle_image import ImagePaddlehubEncoder
@pytest.mark.parametrize('arr_in', [
(np.on... |
from operator import itemgetter
from typing import Sequence, Iterable
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions t... | from operator import itemgetter
from typing import Sequence, Iterable
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions t... |
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor,... | from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor,... |
from pydantic import Field
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
launch_darkly_sdk_key: str = Field(
default="",
description="The Launch Darkly SDK key",
validation_alias="LAUNCH_DARKLY_SDK_KEY",
)
model_config = SettingsConfi... | from pydantic import Field
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
launch_darkly_sdk_key: str = Field(
default="",
description="The Launch Darkly SDK key",
validation_alias="LAUNCH_DARKLY_SDK_KEY"
)
model_config = SettingsConfig... |
"""Mixture modeling algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bayesian_mixture import BayesianGaussianMixture
from ._gaussian_mixture import GaussianMixture
__all__ = ["BayesianGaussianMixture", "GaussianMixture"]
| """Mixture modeling algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bayesian_mixture import BayesianGaussianMixture
from ._gaussian_mixture import GaussianMixture
__all__ = ["GaussianMixture", "BayesianGaussianMixture"]
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.mimetypes import POINT_CLOUD_EXTRA_EXTENSIONS
from docarray.t... | from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.doc... |
from . import utils
from .model import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
wav... | from . import utils
from .model import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav... |
"""Data struct for document summary index."""
from dataclasses import dataclass, field
from typing import Dict, List
from llama_index.core.data_structs.data_structs import IndexStruct
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.schema import BaseNode
@dataclass
class ... | """Data struct for document summary index."""
from dataclasses import dataclass, field
from typing import Dict, List
from llama_index.core.data_structs.data_structs import IndexStruct
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.schema import BaseNode
@dataclass
class ... |
from typing import Union, TypeVar, Any, TYPE_CHECKING, Type, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
... | from typing import Union, TypeVar, Any, TYPE_CHECKING, Type, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig, PydanticValueError
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOG_PROCESSOR, LOOPS,
METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS,
OPTIMIZERS, PARAM... | # Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RU... |
"""Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether... | """Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether ... |
from typing import Any, Dict, List, Optional, Union
from docarray.utils._internal.query_language.lookup import (
LookupLeaf,
LookupNode,
LookupTreeElem,
Q,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': ... | from typing import Any, Dict, List, Optional, Union
from docarray.utils._internal.query_language.lookup import (
LookupLeaf,
LookupNode,
LookupTreeElem,
Q,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': ... |
import os
from deprecated import deprecated
from typing import Any, Optional
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
DEFAULT_API_BASE = "https://api.studio.nebius.ai/v1"
@deprecated(
reason="This class has been deprecated and will no longer be maintained. Please use llama-index-llms-neb... | import os
from typing import Any, Optional
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
DEFAULT_API_BASE = "https://api.studio.nebius.ai/v1"
class NebiusMultiModal(OpenAIMultiModal):
"""
Nebius AI Studio Multimodal class.
"""
def __init__(
self,
model: str,
... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.acti... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch.utils.checkpoint import checkpoint
from mmdet.registry import MODELS
@MODELS.register_module()
class HRFPN(BaseModule):
... | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.utils.checkpoint import checkpoint
from mmdet.registry import MODELS
@MODELS.register_module()
class HRFPN(BaseModule):
... |
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.doc... | from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.doc... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... |
import warnings
from abc import abstractmethod
from typing import Iterable, Iterator, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getat... | import warnings
from abc import abstractmethod
from typing import Iterable, Iterator, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getat... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple, Union
import cv2
import numpy as np
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.dl_utils import tensor2imgs
DATA_BATCH = Optional[Union[dict, tuple, list]]
... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.structures import BaseDataElement
from mmengine.utils.dl_utils import tensor2imgs
# TODO:... |
from jina import Flow, Document, DocumentArray
from ...flair_text import FlairTextEncoder
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml... | from jina import Flow, Document, DocumentArray
from jinahub.encoder.flair_text import FlairTextEncoder
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_con... |
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from lan... | """Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from lan... |
# Copyright (c) OpenMMLab. All rights reserved.
from .layer_decay_optimizer_constructor import \
LearningRateDecayOptimizerConstructor
__all__ = ['LearningRateDecayOptimizerConstructor']
| # Copyright (c) OpenMMLab. All rights reserved.
from .builder import OPTIMIZER_BUILDERS, build_optimizer
from .layer_decay_optimizer_constructor import \
LearningRateDecayOptimizerConstructor
__all__ = [
'LearningRateDecayOptimizerConstructor', 'OPTIMIZER_BUILDERS',
'build_optimizer'
]
|
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from docarray.array.mixins import ParallelMixin, GroupMixin
from docarray.helper import protocol_and_compress_from_file_path
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class DocumentArrayLoader(ParallelMixin, GroupMix... | from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from .. import ParallelMixin, GroupMixin
from ....helper import protocol_and_compress_from_file_path
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class DocumentArrayLoader(ParallelMixin, GroupMixin):
def __init__(
... |
#!/usr/bin/env python3
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
from Cython import Tempita as tempita
# XXX: If this import ever fails (does it really?), vendor either
# cython.tempita or numpy/npy_tempita.
def process_tempita(fromfile, outfile=None)... | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
from Cython import Tempita as tempita
# XXX: If this import ever fails (does it really?), vendor either
# cython.tempita or numpy/npy_tempita.
def process_tempita(fromfile, outfile=None):
"""Process tempita... |
import pytest
from backend.util.request import validate_url
def test_validate_url():
# Rejected IP ranges
with pytest.raises(ValueError):
validate_url("localhost", [])
with pytest.raises(ValueError):
validate_url("192.168.1.1", [])
with pytest.raises(ValueError):
validate_ur... | import pytest
from backend.util.request import validate_url
def test_validate_url():
with pytest.raises(ValueError):
validate_url("localhost", [])
with pytest.raises(ValueError):
validate_url("192.168.1.1", [])
with pytest.raises(ValueError):
validate_url("127.0.0.1", [])
w... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.