input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolMessage,
)
from langchain.agents.output_parsers.tools import ToolAgentAction
def _create_tool_message(
agent_action: ToolAgentAction, obser... | import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolMessage,
)
from langchain.agents.output_parsers.tools import ToolAgentAction
def _create_tool_message(
agent_action: ToolAgentAction, obser... |
"""Embeddings."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.embeddings.embeddings import Embeddings
from langchain_core.embeddings.fake import (
DeterministicFakeEmbedding,
FakeEmbeddings,
)
__all__ = ("Det... | """Embeddings."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.embeddings.embeddings import Embeddings
from langchain_core.embeddings.fake import (
DeterministicFakeEmbedding,
FakeEmbeddings,
)
__all__ = ["DeterministicFakeEmbe... |
import sys
import warnings
import torch
from torch.onnx import symbolic_opset11 as opset11
from torch.onnx.symbolic_helper import parse_args
_ONNX_OPSET_VERSION_11 = 11
_ONNX_OPSET_VERSION_16 = 16
BASE_ONNX_OPSET_VERSION = _ONNX_OPSET_VERSION_11
@parse_args("v", "v", "f")
def symbolic_multi_label_nms(g, boxes, scor... | import sys
import warnings
import torch
_onnx_opset_version_11 = 11
_onnx_opset_version_16 = 16
base_onnx_opset_version = _onnx_opset_version_11
def _register_custom_op():
from torch.onnx.symbolic_helper import parse_args
from torch.onnx.symbolic_opset11 import select, squeeze, unsqueeze
@parse_args("v... |
"""Utils for manipulating images."""
import base64
from io import BytesIO
from typing import cast
from PIL import Image
from PIL.ImageFile import ImageFile
def img_2_b64(image: ImageFile, format: str = "JPEG") -> str:
"""
Convert a PIL.Image to a base64 encoded image string.
Args:
image (ImageF... | """Utils for manipulating images."""
import base64
from io import BytesIO
from typing import cast
from PIL import Image
from PIL.ImageFile import ImageFile
def img_2_b64(image: ImageFile, format: str = "JPEG") -> str:
"""
Convert a PIL.Image to a base64 encoded image string.
Args:
image (ImageFi... |
"""Tool for the Google Scholar"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
class GoogleScholarQueryRun(BaseTool):
"""Tool that queries the... | """Tool for the Google Scholar"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
class GoogleScholarQueryRun(BaseTool): # type: ignore[override]
... |
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this fil... | import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zero... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_to... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_availa... |
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import Document, Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(Document):
img: Image
text: Text
title: str
input_doc ... | import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import Document, Image, Text
from docarray.typing import Tensor
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(Document):
img: Image
text: Text
title: str
input_doc =... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend as backend
from keras.src.backend.config import (
disable_flash_attention as disable_flash_attention,
)
from keras.src.backend.config import (
en... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend as backend
from keras.src.backend.config import (
disable_flash_attention as disable_flash_attention,
)
from keras.src.backend.config import (
en... |
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/coco/'
model = dict(test_cfg=dict(
max_per_img=300,
chunked_size=40,
))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
ty... | _base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/coco/'
model = dict(test_cfg=dict(
max_per_img=300,
chunked_size=40,
))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
ty... |
from typing import Type, TYPE_CHECKING
from docarray import Document
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class EmptyMixin:
"""Helper functions for building arrays with empty Document."""
@classmethod
def empty(cls: Type['T'], size: int = 0, *args, **kwargs) -> 'T':
... | from typing import Type, TYPE_CHECKING
from docarray import Document
if TYPE_CHECKING:
from docarray.typing import T
class EmptyMixin:
"""Helper functions for building arrays with empty Document."""
@classmethod
def empty(cls: Type['T'], size: int = 0, *args, **kwargs) -> 'T':
"""Create a :... |
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
... | import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
import csv
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code... | from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample
from sentence_transformers import losses
import os
import gzip
import csv
from datetime import datetime
import logging
from torch.utils.data import ... |
import hashlib
import logging
from os import PathLike
from pathlib import Path
from typing import Union
import torch
from torchaudio._internal import download_url_to_file
_LG = logging.getLogger(__name__)
def _get_local_path(key):
path = Path(torch.hub.get_dir()) / "torchaudio" / Path(key)
path.parent.mkdir... | import hashlib
import logging
from os import PathLike
from pathlib import Path
from typing import Union
import torch
from torchaudio._internal import download_url_to_file
_LG = logging.getLogger(__name__)
def _get_local_path(key):
path = Path(torch.hub.get_dir()) / "torchaudio" / Path(key)
path.parent.mkdir... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.promptlayer_openai import PromptLayerChatOpenAI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# hand... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.promptlayer_openai import PromptLayerChatOpenAI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# hand... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestT... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class... |
"""Test Prediction Guard API wrapper."""
import pytest
from langchain_community.llms.predictionguard import PredictionGuard
def test_predictionguard_invoke() -> None:
"""Test valid call to prediction guard."""
llm = PredictionGuard(model="Hermes-3-Llama-3.1-8B")
output = llm.invoke("Tell a joke.")
a... | """Test Prediction Guard API wrapper."""
import pytest
from langchain_community.llms.predictionguard import PredictionGuard
def test_predictionguard_invoke() -> None:
"""Test valid call to prediction guard."""
llm = PredictionGuard(model="Hermes-3-Llama-3.1-8B") # type: ignore[call-arg]
output = llm.in... |
from dataclasses import dataclass
from typing import Optional
@dataclass
class HubExecutor:
"""Basic Executor Data Class from Hubble"""
uuid: str = None
name: Optional[str] = None
commit_id: Optional[str] = None
tag: Optional[str] = None
visibility: Optional[bool] = None
image_name: Optio... | from dataclasses import dataclass
from typing import Optional
@dataclass
class HubExecutor:
"""Basic Executor Data Class from Hubble"""
uuid: str = None
name: Optional[str] = None
commit_id: Optional[str] = None
tag: Optional[str] = None
visibility: Optional[bool] = None
image_name: Optio... |
import pytest
from jina import Flow
from jina.enums import ProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_protocol)... | import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_pr... |
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: str | None = Query(min_length=3)):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
| from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: str | None = Query(default=..., min_length=3)):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import TEXT_FILE_FORMATS
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields imp... | from typing import Optional, TYPE_CHECKING, TypeVar, Type, Union, Any
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import TEXT_FILE_FORMATS
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields imp... |
"""Test indices/utils.py."""
import pytest
from llama_index.core.indices.utils import expand_tokens_with_subtokens
def test_expand_tokens_with_subtokens() -> None:
"""Test expand tokens."""
tokens = {"foo bar", "baz", "hello hello world bye"}
keywords = expand_tokens_with_subtokens(tokens)
assert key... | """Test indices/utils.py."""
import pytest
from llama_index.core.indices.utils import expand_tokens_with_subtokens
def test_expand_tokens_with_subtokens() -> None:
"""Test expand tokens."""
tokens = {"foo bar", "baz", "hello hello world bye"}
keywords = expand_tokens_with_subtokens(tokens)
assert keyw... |
import pytest
from google.cloud.aiplatform_v1beta1 import FunctionCall
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
TextBlock,
ImageBlock,
)
from llama_index.llms.vertex.gemini_utils import (
convert_chat_message_to_gemini_content,
is_gemini_model,
)
def test_is_gem... | from google.cloud.aiplatform_v1beta1 import FunctionCall
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.llms.vertex.gemini_utils import (
convert_chat_message_to_gemini_content,
is_gemini_model,
)
def test_is_gemini_model():
assert is_gemini_model("gemini-2.0-flash... |
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder.
Args:
use_box_type (bool): Whether to warp decoded boxes with the
boxlist data structure. Defaults to False.
"""
# The size ... | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder."""
def __init__(self, **kwargs):
pass
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and g... |
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils impor... | from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils impor... |
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.5.0"
@keras_export("keras.version")
def version():
return __version__
| from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.4.1"
@keras_export("keras.version")
def version():
return __version__
|
"""Comparison evaluators.
This module contains evaluators for comparing the output of two models,
be they LLMs, Chains, or otherwise. This can be used for scoring
preferences, measuring similarity / semantic equivalence between outputs,
or any other comparison task.
Example:
>>> from langchain_community.chat_mode... | """Comparison evaluators.
This module contains evaluators for comparing the output of two models,
be they LLMs, Chains, or otherwise. This can be used for scoring
preferences, measuring similarity / semantic equivalence between outputs,
or any other comparison task.
Example:
>>> from langchain_community.chat_mode... |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U... |
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
... | _base_ = './mask_rcnn_r101_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
b... |
"""Text to Image tool spec."""
from io import BytesIO
from typing import List, Optional
import openai
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class TextToImageToolSpec(BaseToolSpec):
"""Text to Image tool spec."""
spec_functions = ["generate_images", "show_images", "g... | """Text to Image tool spec."""
from io import BytesIO
from typing import List, Optional
import openai
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class TextToImageToolSpec(BaseToolSpec):
"""Text to Image tool spec."""
spec_functions = ["generate_images", "show_images", "g... |
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_modu... | # Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_modu... |
import logging
from typing import Any, List, Optional, Sequence
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.composability.graph import ComposableGraph
from llama_index.core.indices.registry import INDEX_STRUCT_TYPE_TO_INDEX_CLASS
from llama_index.core.storage.storage_context impor... | import logging
from typing import Any, List, Optional, Sequence
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.composability.graph import ComposableGraph
from llama_index.core.indices.registry import INDEX_STRUCT_TYPE_TO_INDEX_CLASS
from llama_index.core.storage.storage_context impor... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
n... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# optimizer
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
... |
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import traceback
from datasets import load_dataset
from sentence_... | """
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import traceback
from datasets import load_dataset
from sentence_... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... | import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseTranslationEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/spl... |
from pydantic import BaseModel
from typing import Optional, Dict, List
class AlphaMatrix(BaseModel):
"""
This class is not necessary to understand to use a KodaRetriever - as it will be automatically instantiated if a dictionary is provided.
Pydantic class to enforce the required fields for a KodaRetriev... | from pydantic import BaseModel
from typing import Optional, Dict, List
class AlphaMatrix(BaseModel):
"""
This class is not necessary to understand to use a KodaRetriever - as it will be automatically instantiated if a dictionary is provided.
Pydantic class to enforce the required fields for a KodaRetriev... |
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
| _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
|
"""Test EdenAi's text moderation Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and w... | """Test EdenAi's text moderation Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and w... |
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.4.0"
@keras_export("keras.version")
def version():
return __version__
| from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.3.3"
@keras_export("keras.version")
def version():
return __version__
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import ConfigType, OptMultiConfig, SampleList
from mmdet.registry imp... | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn.functional as F
from mmcv.runner import BaseModule, force_fp32
from mmengine.model import stack_batch
from ..builder import build_loss
from ..utils import interpolate_as
class BaseSemanticHead(BaseModule, metacla... |
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of docume... | from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of docume... |
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing import DocumentArray... | from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing import DocumentArray... |
# Copyright (c) OpenMMLab. All rights reserved.
from io import StringIO
from .file_client import FileClient
def list_from_file(filename,
prefix='',
offset=0,
max_num=0,
encoding='utf-8',
file_client_args=None):
"""Load... | # Copyright (c) OpenMMLab. All rights reserved.
# type: ignore
from io import StringIO
from .file_client import FileClient
def list_from_file(filename,
prefix='',
offset=0,
max_num=0,
encoding='utf-8',
file_client_args=Non... |
import weakref
from keras.src.backend.common import global_state
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, attr, value)
except AttributeError:
if value is None:
return
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict i... | import weakref
from keras.src.backend.common import global_state
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, "_keras_mask", value)
except AttributeError:
if value is None:
return
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if at... |
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .transforms_test_impl import TransformsTestBase
@skipIfNoCuda
class TransformsCUDAFloat32Test(TransformsTestBase, PytorchTestCase):
device = "cuda"
dtype = torch.float32
@skipIfNoCuda
class TransformsCUDAFloat64Tes... | import torch
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoCuda,
)
from .transforms_test_impl import TransformsTestBase
@skipIfNoCuda
class TransformsCUDAFloat32Test(TransformsTestBase, PytorchTestCase):
device = "cuda"
dtype = torch.float32
@skipIfNoCuda
class TransformsC... |
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(req... | _base_ = './cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(require... |
import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..uti... | import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..uti... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import DuckDuckGoSearchResults, DuckDuckGoSearchRun
from langchain_community.tools.ddg_search.tool import DDGInput, DuckDuckGoSearchTool
# Create a way to dynamically look up depr... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import DuckDuckGoSearchResults, DuckDuckGoSearchRun
from langchain_community.tools.ddg_search.tool import DDGInput, DuckDuckGoSearchTool
# Create a way to dynamically look up depr... |
import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_pr... | import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_pr... |
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIR... | import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIR... |
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim):
""... | import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim):
""... |
import inspect
import re
from hashlib import sha256
from typing import List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text... | import inspect
import re
from hashlib import sha256
from typing import List
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_... |
"""
Demo for prediction using individual trees and model slices
===========================================================
"""
import os
import numpy as np
from scipy.special import logit
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
train = os.path.j... | """
Demo for prediction using individual trees and model slices
===========================================================
"""
import os
import numpy as np
from scipy.special import logit
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
train = os.path.jo... |
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate... | import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate... |
def __getattr__(name: str):
import warnings
if name == "AudioMetaData":
warnings.warn(
"`torchaudio.backend.common.AudioMetaData` has been moved to "
"`torchaudio.AudioMetaData`. Please update the import path.",
stacklevel=2,
)
from torchaudio import ... | def __getattr__(name: str):
import warnings
if name == "AudioMetaData":
warnings.warn(
"`torchaudio.backend.common.AudioMetaData` has been moved to "
"`torchaudio.AudioMetaData`. Please update the import path.",
stacklevel=2,
)
from torchaudio._backen... |
"""
Example of using callbacks with Dask
====================================
"""
import numpy as np
from dask.distributed import Client, LocalCluster
from dask_ml.datasets import make_regression
from dask_ml.model_selection import train_test_split
import xgboost as xgb
import xgboost.dask as dxgb
from xgboost.dask i... | """
Example of using callbacks with Dask
====================================
"""
import numpy as np
from dask.distributed import Client, LocalCluster
from dask_ml.datasets import make_regression
from dask_ml.model_selection import train_test_split
import xgboost as xgb
import xgboost.dask as dxgb
from xgboost.dask im... |
import grpc.aio
import pytest
from grpc import StatusCode
from grpc.aio import Metadata
from jina.excepts import BaseJinaException, InternalNetworkError
@pytest.fixture
def aio_rpc_error():
return grpc.aio.AioRpcError(StatusCode.OK, None, None, details='I am a grpc error')
def test_ine_parent_classes(aio_rpc_e... | import grpc.aio
import pytest
from grpc import StatusCode
from jina.excepts import BaseJinaException, InternalNetworkError
@pytest.fixture
def aio_rpc_error():
return grpc.aio.AioRpcError(StatusCode.OK, None, None, details='I am a grpc error')
def test_ine_parent_classes(aio_rpc_error):
err = InternalNetwo... |
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natura... | """
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natura... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.imagenet_utils import (
decode_predictions as decode_predictions,
)
from keras.src.applications.imagenet_utils import (
preprocess_input as preprocess_input,
)
| """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.imagenet_utils import decode_predictions
from keras.src.applications.imagenet_utils import preprocess_input
|
"""
====================================
How to write your own TVTensor class
====================================
.. note::
Try on `Colab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_tv_tensors.ipynb>`_
or :ref:`go to the end <sphx_glr_down... | """
====================================
How to write your own TVTensor class
====================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_tv_tensors.ipynb>`_
or :ref:`go to the end <sphx_glr_dow... |
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms... | tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms... |
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
... | from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
... |
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Ju... | import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Ju... |
import csv
import os
from pathlib import Path
from torchaudio.datasets import ljspeech
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_TRANSCRIPTS = [
"Test transcript 1",
"Test transcript 2",
"Test transcript 3",
"In 1465 Sweynhe... | import csv
import os
from pathlib import Path
from torchaudio.datasets import ljspeech
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
_TRANSCRIPTS = [
"Test transcript 1",
"Test transcript 2",
"Test transcript... |
""" """
from torch.utils.data import IterableDataset
import numpy as np
from typing import List
from ..readers import InputExample
import logging
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TR... | """
"""
from torch.utils.data import IterableDataset
import numpy as np
from typing import List
from ..readers import InputExample
import logging
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TR... |
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1... | """All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1... |
import os
from pathlib import Path
from typing import Callable, Optional, Union
from .folder import ImageFolder
from .utils import download_and_extract_archive
class EuroSAT(ImageFolder):
"""RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
Args:
root (str or ``pathlib.Path... | import os
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import download_and_extract_archive
class EuroSAT(ImageFolder):
"""RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
Args:
root (string): Root directory of dataset where ``root/euros... |
import numpy as np
import pytest
import keras
from keras.src import layers
from keras.src import losses
from keras.src import metrics
from keras.src import optimizers
from keras.src import testing
class MyModel(keras.Model):
def __init__(self, hidden_dim, output_dim, **kwargs):
super().__init__(**kwargs)... | import numpy as np
import pytest
import keras
from keras.src import layers
from keras.src import losses
from keras.src import metrics
from keras.src import optimizers
from keras.src import testing
class MyModel(keras.Model):
def __init__(self, hidden_dim, output_dim, **kwargs):
super().__init__(**kwargs)... |
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
| _base_ = [
'../_base_/models/faster-rcnn_r50-caffe-dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
... |
import unittest
import torchaudio
from torchaudio.prototype.pipelines import VGGISH
class VGGishPipelineTest(unittest.TestCase):
def test_vggish(self):
input_sr = VGGISH.sample_rate
input_proc = VGGISH.get_input_processor()
model = VGGISH.get_model()
path = torchaudio.utils.downlo... | import torchaudio
from torchaudio.prototype.pipelines import VGGISH
def test_vggish():
input_sr = VGGISH.sample_rate
input_proc = VGGISH.get_input_processor()
model = VGGISH.get_model()
path = torchaudio.utils.download_asset("test-assets/Chopin_Ballade_-1_In_G_Minor,_Op._23_excerpt.mp3")
waveform,... |
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHa... | # CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHa... |
import pytest
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from pytest_httpx import HTTPXMock
from requests_mock import Mocker
from contextlib import contextmanager
import os
from typing import Generator, Any
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock, base_url: str):
... | import pytest
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock, base_url: str):
mock_response = {
"data": [
{
"id": "model1",
"object": "model"... |
__version__ = "2.7.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import Cross... | __version__ = "2.6.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import Cross... |
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import torch_device
from .... | import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import torch_device
fr... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, Flow
try:
from video_torch_encoder import VideoTorchEncoder
except:
from ...video_torch_encoder import VideoTorchEncoder
cur_dir = os.path.dirname(os.... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, Flow
try:
from video_torch_encoder import VideoTorchEncoder
except:
from jinahub.encoder.video_torch_encoder import VideoTorchEncoder
cur_dir = os.pat... |
import csv
import pathlib
from typing import Any, Callable, Optional, Tuple, Union
import PIL
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class GTSRB(VisionDataset):
"""`German Traffic Sign Recognition Benchmark (GTSRB) <http... | import csv
import pathlib
from typing import Any, Callable, Optional, Tuple
import PIL
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class GTSRB(VisionDataset):
"""`German Traffic Sign Recognition Benchmark (GTSRB) <https://ben... |
from typing import Dict, Optional
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://text.... | from typing import Dict, Optional
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://text.... |
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
s... | from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
s... |
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, HttpRes... | import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datapoints import Image, Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvi... |
import csv
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, datasets, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just... | from torch.utils.data import DataLoader
from sentence_transformers import models, losses, datasets
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import os
im... |
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
... | _base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
... |
import itertools
import numpy as np
from absl.testing import parameterized
from keras.src import ops
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bo... | import itertools
import numpy as np
from absl.testing import parameterized
from keras.src import ops
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bo... |
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level... | """
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level... |
import json
import re
from typing import TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T = Typ... | import json
import re
from typing import Type, TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T... |
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()... | import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
de... |
import collections
import json
import logging
import os
import string
from typing import Iterable, List
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokeni... | from typing import List, Iterable
import collections
import string
import os
import json
import logging
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokeniz... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkp... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k... |
from typing import List, Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
... | from typing import List, Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: Ima... |
"""Init file of LlamaIndex."""
__version__ = "0.12.30"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... | """Init file of LlamaIndex."""
__version__ = "0.12.29"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... |
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGene... | _base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGene... |
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that l... | # Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class IterTi... |
from __future__ import annotations
import operator
from collections.abc import Sequence
from typing import Optional
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from pydantic import ConfigDict
from langchain.retrievers.document_compressors.cross... | from __future__ import annotations
import operator
from typing import Optional, Sequence
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from pydantic import ConfigDict
from langchain.retrievers.document_compressors.cross_encoder import BaseCrossEn... |
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(... | # Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):... |
import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser
from jina.serve.networking.utils import send_request_sync
from jina_cli.api import execut... | import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser
from jina.serve.networking.utils import send_request_sync
from jina_cli.api import execut... |
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR,... | # Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
... |
import operator
import pytest
from langchain_core.utils.usage import _dict_int_op
def test_dict_int_op_add() -> None:
left = {"a": 1, "b": 2}
right = {"b": 3, "c": 4}
result = _dict_int_op(left, right, operator.add)
assert result == {"a": 1, "b": 5, "c": 4}
def test_dict_int_op_subtract() -> None:... | import operator
import pytest
from langchain_core.utils.usage import _dict_int_op
def test_dict_int_op_add() -> None:
left = {"a": 1, "b": 2}
right = {"b": 3, "c": 4}
result = _dict_int_op(left, right, operator.add)
assert result == {"a": 1, "b": 5, "c": 4}
def test_dict_int_op_subtract() -> None:... |
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generatio... | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generatio... |
from pathlib import Path
from typing import List
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex... | from pathlib import Path
from typing import List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray, Executor
from ...audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.