input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
prepr... |
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
... | _base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .det_data_sample import DetDataSample, OptSampleList, SampleList
from .reid_data_sample import ReIDDataSample
from .track_data_sample import (OptTrackSampleList, TrackDataSample,
TrackSampleList)
__all__ = [
'DetDataSample', 'Samp... | # Copyright (c) OpenMMLab. All rights reserved.
from .det_data_sample import DetDataSample, OptSampleList, SampleList
__all__ = ['DetDataSample', 'SampleList', 'OptSampleList']
|
from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
Dict,
)
if TYPE_CHECKING:
import numpy as np
from docarray import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False... | from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
Dict,
)
if TYPE_CHECKING:
import numpy as np
from docarray import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False... |
from abc import abstractmethod
from typing import Iterable, Union
from qdrant_client import QdrantClient
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
@property
@abstractmethod
def client(self) -> Qdran... | from abc import abstractmethod
from typing import Iterable, Union
from qdrant_client import QdrantClient
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
@property
@abstractmethod
def client(self) -> Qdran... |
from docarray import BaseDoc
from docarray.typing import PointCloud3DUrl
def test_set_point_cloud_url():
class MyDocument(BaseDoc):
point_cloud_url: PointCloud3DUrl
d = MyDocument(point_cloud_url="https://jina.ai/mesh.obj")
assert isinstance(d.point_cloud_url, PointCloud3DUrl)
assert d.point... | from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
def test_set_point_cloud_url():
class MyDocument(BaseDocument):
point_cloud_url: PointCloud3DUrl
d = MyDocument(point_cloud_url="https://jina.ai/mesh.obj")
assert isinstance(d.point_cloud_url, PointCloud3DUrl)
asse... |
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Feature... | # ruff: noqa
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, Class... |
from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``... | from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``... |
"""Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..compat import import_cupy
from ..core import DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM wi... | """Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..core import DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0... |
import requests
import urllib.parse
from typing import Dict
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
SEARCH_URL_TMPL = "https://api.search.brave.com/res/v1/web/search?{params}"
class BraveSearchToolSpec(BaseToolSpec):
"""
Brave Search tool sp... | import requests
import urllib.parse
from typing import Dict
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
SEARCH_URL_TMPL = "https://api.search.brave.com/res/v1/web/search?{params}"
class BraveSearchToolSpec(BaseToolSpec):
"""
Brave Search tool sp... |
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
from sentence_transformers.training_args i... | from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
from sentence_transformers.training_args im... |
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for indivi... | """
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continious labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for indivi... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Dict, Tuple
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...torch_encoder import ImageTorchEncoder
def test_config():
ex ... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Tuple, Dict
import pytest
import numpy as np
from jina import DocumentArray, Document, Executor
from ...torch_encoder import ImageTorchEncoder
def test_config():
ex... |
import os
from pydoc import locate
import numpy as np
import pytest
from jina import Document, Flow
from PIL.Image import fromarray
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def numpy_image_uri(tmpdir):
blob = np.random.randint(255, size=(96, 96, 3), dtype='uint8')
im = fromarray(... | import os
from pydoc import locate
import numpy as np
import pytest
from PIL.Image import fromarray
from jina import Flow, Document
from ...normalizer import ImageNormalizer
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def numpy_image_uri(tmpdir):
blob = np.random.randint(255, size=(96, ... |
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder) -> None:
"""
FlopsLoss implements a... | from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder) -> None:
"""
FlopsLoss implements a... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling op... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling op... |
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')... | from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')... |
from llama_index.llms.openai_like.base import OpenAILike
class OpenLLM(OpenAILike):
r"""
OpenLLM LLM.
A thin wrapper around OpenAI interface to help users interact with OpenLLM's running server.
Examples:
`pip install llama-index-llms-openllm`
```python
from llama_index.llm... | from llama_index.llms.openai_like.base import OpenAILike
class OpenLLM(OpenAILike):
r"""
OpenLLM LLM.
A thin wrapper around OpenAI interface to help users interact with OpenLLM's running server.
Examples:
`pip install llama-index-llms-openllm`
```python
from llama_index.llm... |
"""Argparser module for Deployment runtimes"""
import argparse
from jina import helper
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into ... | """Argparser module for Deployment runtimes"""
import argparse
from jina import helper
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into ... |
"""Module to change the configuration of FFmpeg libraries (such as libavformat).
It affects functionalities in :py:mod:`torchaudio.io` (and indirectly :py:func:`torchaudio.load`).
"""
# This file is just for BC.
def __getattr__(item):
from torio.utils import ffmpeg_utils
return getattr(ffmpeg_utils, item)
| def __getattr__(item):
from torio.utils import ffmpeg_utils
return getattr(ffmpeg_utils, item)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple, Union
from mmcv.runner import BaseModule
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from torch import Tensor
from mmdet.core import DetDataSample
from mm... | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from ..builder import build_shared_head
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor=None,
... |
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param... | """Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param ... |
"""Vector stores."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.vectorstores.base import VST, VectorStore, VectorStoreRetriever
from langchain_core.vectorstores.in_memory import InMemoryVectorStore
__all__ = (
"VectorStore"... | """Vector stores."""
from langchain_core.vectorstores.base import VST, VectorStore, VectorStoreRetriever
from langchain_core.vectorstores.in_memory import InMemoryVectorStore
__all__ = [
"VectorStore",
"VST",
"VectorStoreRetriever",
"InMemoryVectorStore",
]
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
__version__ = '0.12.6'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
| __version__ = '0.12.5'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| _base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
"""Mock embedding model."""
from typing import Any, List
from llama_index.core.base.embeddings.base import BaseEmbedding
class MockEmbedding(BaseEmbedding):
"""
Mock embedding.
Used for token prediction.
Args:
embed_dim (int): embedding dimension
"""
embed_dim: int
def __ini... | """Mock embedding model."""
from typing import Any, List
from llama_index.core.base.embeddings.base import BaseEmbedding
class MockEmbedding(BaseEmbedding):
"""Mock embedding.
Used for token prediction.
Args:
embed_dim (int): embedding dimension
"""
embed_dim: int
def __init__(s... |
from typing import Any, Literal
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.integrations.providers import ProviderNam... | from typing import Any, Literal
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.integrations.providers import ProviderNam... |
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric i... | # Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric i... |
import logging
import sys
import uuid
import pytest
from langchain.callbacks.tracers import LoggingCallbackHandler
def test_logging(
caplog: pytest.LogCaptureFixture,
capsys: pytest.CaptureFixture[str],
) -> None:
# Set up a Logger and a handler so we can check the Logger's handlers work too
logger ... | import logging
import sys
import uuid
import pytest
from langchain.callbacks.tracers import LoggingCallbackHandler
def test_logging(
caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture[str]
) -> None:
# Set up a Logger and a handler so we can check the Logger's handlers work too
logger = log... |
"""
Train XGBoost with cat_in_the_dat dataset
=========================================
A simple demo for categorical data support using dataset from Kaggle categorical data
tutorial.
The excellent tutorial is at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques
And the data can be found at:
https:... | """
Train XGBoost with cat_in_the_dat dataset
=========================================
A simple demo for categorical data support using dataset from Kaggle categorical data
tutorial.
The excellent tutorial is at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques
And the data can be found at:
https:... |
import pathlib
from typing import Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain.callbacks import FileCallbackHandler
from langchain.chains.base import Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_k... | import pathlib
from typing import Any, Optional
import pytest
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain.callbacks import FileCallbackHandler
from langchain.chains.base import Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = T... |
"""Init file."""
from llama_index.readers.dad_jokes.base import DadJokesReader
__all__ = ["DadJokesReader"]
| """Init file."""
from llama_index.readers.dad_jokes.base import DadJokesReader
__all__ = ["DadJokesReader"]
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.4.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.3.2'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
versio... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
"""Dataset for DeepFashion."""
METAINFO = {
'CLASSES': ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants',
... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
'neckwear', 'headwear', 'eyeglass', 'belt', ... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from torch.optim import SGD
from mmengine.model import BaseDataPreprocessor, BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODELS
from mmengine.testing imp... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from torch.optim import SGD
from mmengine.model import BaseDataPreprocessor, BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODELS
from mmengine.testing imp... |
"""String utilities."""
from typing import Any
def stringify_value(val: Any) -> str:
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
if isinstance(val, dict):
return "\n" + ... | """String utilities."""
from typing import Any
def stringify_value(val: Any) -> str:
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
elif isinstance(val, dict):
return "\n" ... |
from abc import abstractmethod
from typing import Iterable, Union
from qdrant_client import QdrantClient
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
@property
@abstractmethod
def client(self) -> Qdran... | from abc import abstractmethod
from typing import Iterable, Union
from qdrant_client import QdrantClient
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise No... |
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseM... | from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseM... |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.rais... | import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels... |
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import gzip
import lzma
import time
import faiss
import numpy as np
######## Functions to find and... | """
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import faiss
import numpy as np
import time
import gzip
import lzma
######## Functions to find and ... |
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
from docarray.documents.text import TextDoc
def test_text_document_operators():
doc = TextDoc(text='text', url='http://url.com')
assert doc == 'text'
assert doc != 'http://url.com'
doc2 = TextDoc(id=doc.id, text='text', url='http://url.com')
assert doc == doc2
doc3 = TextDoc(id='other-id', ... | from docarray.documents.text import TextDoc
def test_text_document_operators():
doc = TextDoc(text='text', url='http://url.com')
assert doc == 'text'
assert doc != 'http://url.com'
doc2 = TextDoc(id=doc.id, text='text', url='http://url.com')
assert doc == doc2
doc3 = TextDoc(id='other-id', ... |
"""Helpers for creating Anthropic API clients.
This module allows for the caching of httpx clients to avoid creating new instances
for each instance of ChatAnthropic.
Logic is largely replicated from anthropic._base_client.
"""
import asyncio
import os
from functools import lru_cache
from typing import Any, Optional... | """Helpers for creating Anthropic API clients.
This module allows for the caching of httpx clients to avoid creating new instances
for each instance of ChatAnthropic.
Logic is largely replicated from anthropic._base_client.
"""
import asyncio
import os
from functools import lru_cache
from typing import Any, Optional... |
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to ad... | from sentence_transformers import losses, SentenceTransformer, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to ad... |
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# da... | _base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# ... |
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import Autoe... | from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck imp... |
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_executio... | from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_executio... |
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument
from docarray.base_document import DocumentResponse
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
cla... | import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument
from docarray.base_document import DocumentResponse
from docarray.documents import Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmd... |
import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
... | import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
... |
"""Module to change the configuration of FFmpeg libraries (such as libavformat).
It affects functionalities in :py:mod:`torchaudio.io` (and indirectly :py:func:`torchaudio.load`).
"""
from typing import Dict, Tuple
import torch
def get_versions() -> Dict[str, Tuple[int]]:
"""Get the versions of FFmpeg libraries... | from typing import Dict, Tuple
import torch
def get_versions() -> Dict[str, Tuple[int]]:
"""Get the versions of FFmpeg libraries
Returns:
dict: mapping from library names to version string,
i.e. `"libavutil": (56, 22, 100)`.
"""
return torch.ops.torchaudio.ffmpeg_get_versions()
... |
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICE... | # ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICE... |
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
_delete_=True,
type='DeformRoIPoolPack',
output_size=7,
output_cha... | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
_delete_=True,
type='DeformRoIPoolPack',
output_size=7,
output_cha... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from .audio_clip.model import AudioCLIP
class A... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from .audio_clip.model import AudioCLIP
class A... |
from docarray import DocumentArray
from jina import requests
from jina.serve.executors import BaseExecutor
class DummyExternalIndexer(BaseExecutor):
@requests
def index(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'indexed'
| from jina.serve.executors import BaseExecutor
class DummyExternalIndexer(BaseExecutor):
pass
|
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.datapoints._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str... | from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.datapoints._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str... |
"""Athena Reader."""
import warnings
from typing import Optional
import boto3
from llama_index.core.readers.base import BaseReader
from sqlalchemy.engine import create_engine
class AthenaReader(BaseReader):
"""
Athena reader.
Follow AWS best practices for security.
AWS discourages hardcoding credent... | """Athena Reader."""
import warnings
from typing import Optional
import boto3
from llama_index.core.readers.base import BaseReader
from sqlalchemy.engine import create_engine
class AthenaReader(BaseReader):
"""Athena reader.
Follow AWS best practices for security.
AWS discourages hardcoding credentials ... |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generatio... | # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generatio... |
import pytest
from docarray import BaseDoc
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp # type: ignore
from docarray.typing import TensorFlowEmbedding, TensorFlowTensor
@pyt... | import pytest
from docarray import BaseDocument
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp # type: ignore
from docarray.typing import TensorFlowEmbedding, TensorFlowTensor
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .efficientnet import EfficientNet
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobil... | # Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt im... |
import os as _os
import sys as _sys
from pathlib import Path as _Path
import datetime as _datetime
__windows__ = _sys.platform == 'win32'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS 1. clean this tuple, 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py
# "\'JINA_.*?\'" ... | import os as _os
import sys as _sys
from pathlib import Path as _Path
import datetime as _datetime
__windows__ = _sys.platform == 'win32'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS 1. clean this tuple, 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py
# "\'JINA_.*?\'" ... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_resnet_v2 import (
InceptionResNetV2 as InceptionResNetV2,
)
from keras.src.applications.inception_resnet_v2 import (
decode_predictions as decode_predi... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_resnet_v2 import InceptionResNetV2
from keras.src.applications.inception_resnet_v2 import decode_predictions
from keras.src.applications.inception_resnet_v2 imp... |
"""Edenai Tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
EdenAiExplicitImageTool,
EdenAiObjectDetectionTool,
EdenAiParsingIDTool,
EdenAiParsingInvoiceTool,
EdenAiSpeechToT... | """Edenai Tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
EdenAiExplicitImageTool,
EdenAiObjectDetectionTool,
EdenAiParsingIDTool,
EdenAiParsingInvoiceTool,
EdenAiSpeechToT... |
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_r... | # Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_r... |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.pipelines import LoadAnnotations
class TestLoadAnnotations(unittest.TestCase):
def setUp(self):
"""Setup the mod... | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.pipelines import LoadAnnotations
class TestLoadAnnotations(unittest.TestCase):
def setUp(self):
"""Setup the mod... |
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import ParamSchedulerHook
class TestParamSchedulerHook:
def test_after_iter(self):
hook = ParamSchedulerHook()
runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
sch... | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import ParamSchedulerHook
class TestParamSchedulerHook:
def test_after_iter(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
sch... |
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.rea... | """
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.rea... |
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None... | from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, **kwargs) -> None:
super().__init__()
self.model = model
... |
import pytest
import inspect
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler, CBEventType
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.nvidia import NVIDIAEmbedding
from openai import AuthenticationError
from pytest_httpx import HTTPXMock
@py... | import pytest
import inspect
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler, CBEventType
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.nvidia import NVIDIAEmbedding
from openai import AuthenticationError
from pytest_httpx import HTTPXMock
@py... |
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RU... | # Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RU... |
from langchain_core.prompts.prompt import PromptTemplate
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
ENTITY_SUMMARIZATION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
SUMMARY_PROMPT,
)
DEFAULT_TEMPLATE = """The following is a friendly convers... | # flake8: noqa
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
ENTITY_SUMMARIZATION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
SUMMARY_PROMPT,
)
from langchain_core.prompts.prompt import PromptTemplate
DEFAULT_TEMPLATE = """The following is a fr... |
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and i... | from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and i... |
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .fluentcommands import FluentSpeechCommands
from .gtzan import GTZAN
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
... | from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .fluentcommands import FluentSpeechCommands
from .gtzan import GTZAN
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
... |
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.util... | import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.datapoints import BoundingBoxes
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils... |
from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
"""
# TODO: Update as it's mentionned trainings ... | from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
"""
# TODO: Update as it's mentionned trainings ... |
"""
Arize-Phoenix LlamaPack.
"""
from typing import TYPE_CHECKING, Any, Dict, List
from llama_index.core import set_global_handler
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.schema import TextNode
if TYPE_CHECKIN... | """
Arize-Phoenix LlamaPack.
"""
from typing import TYPE_CHECKING, Any, Dict, List
from llama_index.core import set_global_handler
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.schema import TextNode
if TYPE_CHECKIN... |
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'b... | # Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'b... |
"""Init file of LlamaIndex."""
__version__ = "0.12.12"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... | """Init file of LlamaIndex."""
__version__ = "0.12.11"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... |
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
... | """
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
... |
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class Cerebras(OpenAILike):
"""
Cerebras LLM.
Examples:
`pip install llama-index-llms-cerebras`
```python
from llama_index.llms.cerebras import Cerebras
# Set up the Cerebras ... | import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class Cerebras(OpenAILike):
"""
Cerebras LLM.
Examples:
`pip install llama-index-llms-cerebras`
```python
from llama_index.llms.cerebras import Cerebras
# Set up the Cerebras ... |
from unittest import mock
import pytest
from llama_index.core.workflow import Context
from llama_index.core.workflow.handler import WorkflowHandler
def test_str():
h = WorkflowHandler()
h.set_result([])
assert str(h) == "[]"
@pytest.mark.asyncio
async def test_stream_no_context():
h = WorkflowHandl... | from unittest import mock
import pytest
from llama_index.core.workflow import Context
from llama_index.core.workflow.handler import WorkflowHandler
def test_str():
h = WorkflowHandler()
h.set_result([])
assert str(h) == "[]"
@pytest.mark.asyncio()
async def test_stream_no_context():
h = WorkflowHan... |
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
import logging
import os
import sys
import tarfile
from sentence_tran... | """
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
import logging
import os
import sys
import tarfile
from sentence_tran... |
import numpy as np
from docarray import Document, DocumentArray, dataclass
from docarray.typing import Text
from jina import Executor, Flow, requests
def test_specific_params():
class MyExec(Executor):
def __init__(self, params_awaited, *args, **kwargs):
super().__init__(*args, **kwargs)
... | import numpy as np
from docarray import DocumentArray, Document, dataclass
from docarray.typing import Text
from jina import Executor, Flow, requests
def test_specific_params():
class MyExec(Executor):
def __init__(self, params_awaited, *args, **kwargs):
super().__init__(*args, **kwargs)
... |
from typing import Union
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
tf_available = i... | from typing import Union
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
try:
import torch # noqa: F401
except ImportError:
AudioTensor = AudioNdArray
else:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
AudioTensor = Union[AudioNdArray, AudioTorchT... |
import logging
from typing import Any, Dict, Optional, Tuple
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_UPSTAGE_API_BASE = "https://api.upstage.ai/v1/solar"
DEFAULT_CONTEXT_WINDOW = 32768
CHAT_MODELS = {
"solar-mini": 32768,
"solar-pro": 4096,
}
FUNCTION_CALLING_MODELS... | import logging
from typing import Any, Dict, Optional, Tuple
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_UPSTAGE_API_BASE = "https://api.upstage.ai/v1/solar"
DEFAULT_CONTEXT_WINDOW = 32768
CHAT_MODELS = {
"solar-1-mini-chat": 32768,
"solar-pro": 4096,
"solar-docvisio... |
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... | import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... |
from langchain_core.exceptions import TracerException
from langchain_core.tracers.base import BaseTracer
__all__ = ["TracerException", "BaseTracer"]
| from langchain_core.tracers.base import BaseTracer, TracerException
__all__ = ["TracerException", "BaseTracer"]
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... | import logging
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseNanoBEIREvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = Spar... |
import platform
from argparse import ArgumentParser
import huggingface_hub
import pandas
import pyarrow
from datasets import __version__ as version
from datasets.commands import BaseDatasetsCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseDatasetsCLICommand):
... | import platform
from argparse import ArgumentParser
import pandas
import pyarrow
from datasets import __version__ as version
from datasets.commands import BaseDatasetsCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseDatasetsCLICommand):
@staticmethod
def... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
class TestKDSingleStageDetector(TestCase):
... |
import re
from typing import TYPE_CHECKING, Any, Dict, Union
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init_... | from sentence_transformers import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:... |
# dataset settings
dataset_type = 'CocoPanopticDataset'
# data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
data_root = 's3://openmmlab/datasets/detection/coco/'
# Meth... | # dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='dis... |
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),... | _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1... |
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
fro... | from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
fro... |
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import subprocess
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture(scope='session')
def build_docker_image() -> str:
img_name = Path(__file__).parents[1]... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, DocumentArray
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def data_generator(test_dir: str):
... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACT... | import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACT... |
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to ad... | from sentence_transformers import losses, SentenceTransformer, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to ad... |
"""
Paged CSV reader.
A parser for tabular data files.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PagedCSVReader(BaseReader):
"""
Paged CSV parser.
Displayed each row... | """Paged CSV reader.
A parser for tabular data files.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PagedCSVReader(BaseReader):
"""Paged CSV parser.
Displayed each row in an... |
_base_ = './mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
... | _base_ = './mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.