input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... | import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = o... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/191... | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/19... |
import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Pipeline,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import torch_device
from... | import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgP... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from ...spacy_text_encoder import SpacyTextEncoder
def test_spacy_text_encoder():
docs = ... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from jinahub.encoder.spacy_text_encoder import SpacyTextEncoder
def test_spacy_text_encoder()... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
... |
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import layers
from keras.src import testing
class MixUpTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.MixUp,
init_kwargs={
... | import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import layers
from keras.src import testing
class MixUpTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.MixUp,
init_kwargs={
... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.regis... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataSample
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.regist... |
"""Utilities for getting information about the runtime environment."""
import platform
from functools import lru_cache
@lru_cache(maxsize=1)
def get_runtime_environment() -> dict:
"""Get information about the LangChain runtime environment.
Returns:
A dictionary with information about the runtime env... | import platform
from functools import lru_cache
@lru_cache(maxsize=1)
def get_runtime_environment() -> dict:
"""Get information about the LangChain runtime environment.
Returns:
A dictionary with information about the runtime environment.
"""
# Lazy import to avoid circular imports
from l... |
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None... | from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None... |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseE... | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseE... |
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ... | # Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ... |
from unittest.mock import MagicMock, AsyncMock
import pytest
import sys
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
READER_TEST_PARAM = pytest.param(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2",
],
{
"parse": True,
... | from unittest.mock import MagicMock, AsyncMock
import pytest
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
READER_TEST_PARAM = pytest.param(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2",
],
{
"parse": True,
},
... |
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import ... | import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activation... |
_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py'
max_epochs = 36
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milesto... | _base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
m... |
from jina.serve.runtimes.gateway.http.fastapi import (
FastAPIBaseGateway,
) # keep import here for backwards compatibility
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.servers.http import HTTPServer
__all__ = ['HTTPGateway']
class HTTPGateway(HTTPServer, BaseGateway):
... | from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway # keep import here for backwards compatibility
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.servers.http import HTTPServer
__all__ = ['HTTPGateway']
class HTTPGateway(HTTPServer, BaseGateway):
"""
... |
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import numerical_utils
NUM_CLASSES = 5
class TestNumericalUtils(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CL... | import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import numerical_utils
NUM_CLASSES = 5
class TestNumericalUtils(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CL... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.Argument... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
# TODO: support fuse_conv_bn, visualization, and format_only
def parse_args():
parser = a... |
from langchain_core.prompts.prompt import PromptTemplate
# For backwards compatibility.
Prompt = PromptTemplate
__all__ = ["Prompt", "PromptTemplate"]
| from langchain_core.prompts.prompt import PromptTemplate
# For backwards compatibility.
Prompt = PromptTemplate
__all__ = ["PromptTemplate", "Prompt"]
|
#!/usr/bin/env python3
"""Generate feature statistics for training set.
Example:
python global_stats.py --model-type librispeech --dataset-path /home/librispeech
"""
import json
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from common import (... | #!/usr/bin/env python3
"""Generate feature statistics for training set.
Example:
python global_stats.py --model-type librispeech --dataset-path /home/librispeech
"""
import json
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from common import (... |
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.... | import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.... |
import importlib
import shutil
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from . import compression
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFi... | import importlib
import shutil
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from ..utils.deprecation_utils import deprecated
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from... |
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import tree
from keras.src.utils.module_utils import tensorflow as tf
def get_input_signature(model):
if not isinstance(model, models.Model):
raise TypeError(
"The m... | from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import tree
from keras.src.utils.module_utils import tensorflow as tf
def get_input_signature(model):
if not isinstance(model, models.Model):
raise TypeError(
"The m... |
import uuid
from typing import Optional
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import Qdrant
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.common import asse... | import uuid
from typing import Optional
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import Qdrant
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.common import asse... |
from __future__ import annotations
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model()... | from __future__ import annotations
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str... |
import functools
import logging
import os
import time
from typing import Any, Awaitable, Callable, Coroutine, ParamSpec, Tuple, TypeVar
from pydantic import BaseModel
class TimingInfo(BaseModel):
cpu_time: float
wall_time: float
def _start_measurement() -> Tuple[float, float]:
return time.time(), os.ti... | import functools
import logging
import os
import time
from typing import Callable, ParamSpec, Tuple, TypeVar
from pydantic import BaseModel
class TimingInfo(BaseModel):
cpu_time: float
wall_time: float
def _start_measurement() -> Tuple[float, float]:
return time.time(), os.times()[0] + os.times()[1]
... |
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model impo... | __copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model impo... |
_base_ = [
'../common/ms-poly_3x_coco-instance.py',
'../_base_/models/mask-rcnn_r50_fpn.py'
]
| _base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
|
import uuid
from typing import Optional
import pytest
from langchain_community.vectorstores import Qdrant
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
qdrant_locations,
)
@pytest.ma... | import uuid
from typing import Optional
import pytest
from langchain_community.vectorstores import Qdrant
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
qdrant_locations,
)
@pytest.ma... |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_d... | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_d... |
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.8.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embe... | from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.7.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embe... |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
prepr... | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
prepr... |
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Pipeline")
class Pipeline(Layer):
"""Applies a series of layers to an input.
This class is useful to build a preprocessi... | from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Pipeline")
class Pipeline(Layer):
"""Applies a series of layers to an input.
This class is useful to build a preprocessi... |
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 r... | from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 r... |
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser, set_pod_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import a... | import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser, set_pod_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import a... |
import langchain_core.tracers.schemas as schemas
from langchain_core.tracers.schemas import __all__ as schemas_all
def test_public_api() -> None:
"""Test for changes in the public API."""
expected_all = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"T... | import langchain_core.tracers.schemas as schemas
from langchain_core.tracers.schemas import __all__ as schemas_all
def test_public_api() -> None:
"""Test for changes in the public API."""
expected_all = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"T... |
from contextlib import asynccontextmanager
from datetime import timedelta
from typing import Optional, List, Dict
from urllib.parse import urlparse
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client, StdioServerParameters
class BasicMCPClient(... | from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client, StdioServerParameters
from urllib.parse import urlparse
from contextlib import asynccontextmanager
class BasicMCPClient(ClientSession):
"""
Basic MCP client that can be used to connec... |
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHEC... | """Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from lan... |
from inspect import signature
from typing import (
Any,
Awaitable,
Callable,
List,
Optional,
Tuple,
Type,
Union,
cast,
get_origin,
get_args,
)
import typing
from llama_index.core.bridge.pydantic import BaseModel, FieldInfo, create_model
def create_schema_from_function(
... | from inspect import signature
from typing import (
Any,
Awaitable,
Callable,
List,
Optional,
Tuple,
Type,
Union,
cast,
get_origin,
get_args,
)
import typing
from llama_index.core.bridge.pydantic import BaseModel, FieldInfo, create_model
def create_schema_from_function(
... |
import requests # type: ignore
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_qdrant import SparseEmbeddings, SparseVector
def qdrant_running_locally() -> bool:
"""Check if Qdrant is running at http://localhost:6333."""
try:
response = ... | from typing import List
import requests # type: ignore
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_qdrant import SparseEmbeddings, SparseVector
def qdrant_running_locally() -> bool:
"""Check if Qdrant is running at http://localhost:6333."""
... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
from typing import List
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p... | # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_... |
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import ... | import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import ... |
import PIL.Image
import pytest
import torch
import torchvision.transforms.v2._utils
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image
from torchvision import datapoints
from torchvision.transforms.v2._utils import has_all, has_any
from torchvision.transforms.v2.functional im... | import PIL.Image
import pytest
import torch
import torchvision.transforms.v2.utils
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image
from torchvision import datapoints
from torchvision.transforms.v2.functional import to_pil_image
from torchvision.transforms.v2.utils import h... |
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import ArrayType
def cosine(x_mat: 'np.ndarray', y_mat: 'np.ndarray', eps: float = 1e-7) -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.n... | from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import ArrayType
def cosine(x_mat: 'np.ndarray', y_mat: 'np.ndarray', eps: float = 1e-7) -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
... |
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.helper import random_port
def test_override_requests():
port = random_port()
class FooExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
for doc in docs:
doc.t... | from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.helper import random_port
def test_override_requests():
port = random_port()
class FooExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
for doc in docs:
doc.t... |
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_transformer import (
ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer)
from .deformable_detr_transformer import (
DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformer... | # Copyright (c) OpenMMLab. All rights reserved.
from .deformable_detr_transformer import (
DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder, DeformableDetrTransformerEncoderLayer)
from .detr_transformer import (DetrTransformerDecoder,
... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_... | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`... |
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
R... | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
... |
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
... | import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
... |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkp... | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkp... |
_base_ = '../fast_rcnn/fast-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type=... | _base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
in... |
# Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry imp... | # Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from ... |
_base_ = './cornernet_hourglass104_mstest_8x6_210e_coco.py'
train_dataloader = dict(batch_size=5)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (10 GPUs) x (5 samples per GPU)
auto_scale_lr = dict(base_batch_size=50)
| _base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, ... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import VGG
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class NumClassCheckHook(Hook):
"""Check whether the `num_classes` in head matches the length of `classes`
in `d... | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import VGG
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class NumClassCheckHook(Hook):
"""Check whether the `num_classes` in head matches the length of `CLASSES`
in `d... |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to in... | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to in... |
import numpy as np
from jina import Flow, Document, DocumentArray
from ..simple_indexer import SimpleIndexer
def test_simple_indexer_flow(tmpdir):
f = Flow().add(
uses=SimpleIndexer,
override_with={'index_file_name': 'name'},
override_metas={'workspace': str(tmpdir)},
)
with f:
... | import numpy as np
from jina import Flow, Document, DocumentArray
from .. import SimpleIndexer
def test_simple_indexer_flow(tmpdir):
f = Flow().add(
uses=SimpleIndexer,
override_with={'index_file_name': 'name'},
override_metas={'workspace': str(tmpdir)},
)
with f:
resp = ... |
from sentence_transformers import SentenceTransformer
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import List, Optional
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Comput... | from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import List, Optional
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed ... |
# Copyright (c) OpenMMLab. All rights reserved.
from .res_layer import ResLayer
__all__ = ['ResLayer']
| from .res_layer import ResLayer
__all__ = ['ResLayer']
|
from typing import (
TYPE_CHECKING,
Iterable,
)
from docarray.array.memory import DocumentArrayInMemory
if TYPE_CHECKING: # pragma: no cover
from docarray.document import Document
class ChunkArray(DocumentArrayInMemory):
"""
:class:`ChunkArray` inherits from :class:`DocumentArray`.
It's a s... | from typing import (
TYPE_CHECKING,
Iterable,
)
from docarray.array.memory import DocumentArrayInMemory
if TYPE_CHECKING:
from docarray.document import Document
class ChunkArray(DocumentArrayInMemory):
"""
:class:`ChunkArray` inherits from :class:`DocumentArray`.
It's a subset of Documents.
... |
from typing import Any, Optional
import pytest
from langchain.callbacks import StdOutCallbackHandler
from langchain.chains.base import CallbackManagerForChainRun, Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ["foo"]
t... | from typing import Any, Dict, List, Optional
import pytest
from langchain.callbacks import StdOutCallbackHandler
from langchain.chains.base import CallbackManagerForChainRun, Chain
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: List[str] = [... |
import json
import os
from typing import Dict
import torch
from torch import Tensor, nn
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_sta... | import torch
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
class WeightedLayerPooling(nn.Module):
"""
Token embeddings are weighted mean of their different hidden layer representations
"""
def __init__(
self, word_embedding_dimension, num_hidden_l... |
"""Init file of LlamaIndex."""
__version__ = "0.12.29"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... | """Init file of LlamaIndex."""
__version__ = "0.12.28"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.... |
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
... | # Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg, build_runner_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
LOOPS, METRICS, MODEL_WRAPPERS, MODELS,
OPTIM_... |
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
... | from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
... |
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import Ba... | import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import Ba... |
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(... | from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample
from sentence_transformers import models, util, evaluation, losses
import logging
import os
import gzip
from datetime import datetime
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logg... |
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
ga... | _base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
ga... |
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL I... | from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL I... |
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .optimizer_hook... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import Param... |
import gzip
from . import InputExample
class PairedFilesReader(object):
"""Reads in the a Pair Dataset, split in two files"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
fIns = []
for filepath in self.filepaths:
f... | from . import InputExample
import gzip
class PairedFilesReader(object):
"""
Reads in the a Pair Dataset, split in two files
"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
""" """
fIns = []
for filepath in self... |
from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.t... | from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.t... |
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_r... | # Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_r... |
from __future__ import annotations
import logging
import time
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_commu... | from __future__ import annotations
import logging
import time
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_commu... |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... |
"""Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
... | """Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
... |
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredEPubLoader(UnstructuredFileLoader):
"""Load `EPub` files using `Unstructured`.
You can run the loade... | from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredEPubLoader(UnstructuredFileLoader):
"""Load `EPub` files using `Unstructured`.
You can run the loade... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .cascade_rcnn import CascadeRCNN
@MODELS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwar... | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .cascade_rcnn import CascadeRCNN
@MODELS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs):
super(SCNet, self).__init__(**k... |
import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype import features
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineR... | import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype import features
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineR... |
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
Th... | from __future__ import annotations
from typing import TYPE_CHECKING, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
Th... |
"""
This tool allows agents to interact with the clickup library
and operate on a Clickup instance.
To use this tool, you must first set as environment variables:
client_secret
client_id
code
Below is a sample script that uses the Clickup tool:
```python
from langchain_community.agent_toolkits.clickup.too... | """
This tool allows agents to interact with the clickup library
and operate on a Clickup instance.
To use this tool, you must first set as environment variables:
client_secret
client_id
code
Below is a sample script that uses the Clickup tool:
```python
from langchain_community.agent_toolkits.clickup.too... |
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regressi... | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regressi... |
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from jina.clients.request import request_generator
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
... | from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from jina.clients.request import request_generator
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentPa... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def p... |
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransfor... | from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransfor... |
"""Module containing the base parser for arguments of Jina."""
import argparse
from jina.parsers.helper import _chf
def set_base_parser():
"""Set the base parser
:return: the parser
"""
from jina import __version__
from jina.helper import colored, format_full_version_info, get_full_version
... | """Module containing the base parser for arguments of Jina."""
import argparse
from jina.parsers.helper import _chf
def set_base_parser():
"""Set the base parser
:return: the parser
"""
from jina import __version__
from jina.helper import colored, format_full_version_info, get_full_version
... |
import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--e... | import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--e... |
"""Test Ollama Chat API wrapper."""
from typing import Any
from unittest.mock import patch
from langchain_ollama import OllamaLLM
MODEL_NAME = "llama3.1"
def test_initialization() -> None:
"""Test integration initialization."""
OllamaLLM(model="llama3")
def test_model_params() -> None:
# Test standar... | """Test Ollama Chat API wrapper."""
from langchain_ollama import OllamaLLM
def test_initialization() -> None:
"""Test integration initialization."""
OllamaLLM(model="llama3")
def test_model_params() -> None:
# Test standard tracing params
llm = OllamaLLM(model="llama3")
ls_params = llm._get_ls_... |
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import Spar... | from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import Spar... |
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit ... | """
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit ... |
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import ElasticV7DocIndex
from tests.index.elastic.fixture import start_storage_v7 # noqa: F401
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config():
class MyDoc(BaseDoc):
text: str
c... | import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import ElasticV7DocIndex
from tests.index.elastic.fixture import start_storage_v7 # noqa: F401
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config():
class MyDoc(BaseDoc):
text: str
c... |
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class VideoDataMixin:
"""Provide helper functions for :class:`Document` to support video data."""
def load_uri_to_video_tensor(self: 'T', only_keyframes: bool = False) -> 'T':
""... | from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from ...typing import T
class VideoDataMixin:
"""Provide helper functions for :class:`Document` to support video data."""
def load_uri_to_video_tensor(self: 'T', only_keyframes: bool = False) -> 'T':
"""Conve... |
from typing import Any, Dict, Optional
from elasticsearch import AsyncElasticsearch, Elasticsearch
from logging import getLogger
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.utils import metadata_dict_to_node
logger = getLogger(__name__)
def get_user_agent() -> str:
... | from typing import Any, Dict, Optional
from elasticsearch import AsyncElasticsearch, Elasticsearch
from logging import getLogger
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.utils import metadata_dict_to_node
logger = getLogger(__name__)
def get_user_agent() -> str:
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Redis
from langchain_community.vectorstores.redis.base import RedisVectorStoreRetriever
from langchain_community.vectorstores.redis.filters import (
Redis... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Redis
from langchain_community.vectorstores.redis.base import RedisVectorStoreRetriever
from langchain_community.vectorstores.redis.filters import (
Redis... |
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from docarray.proto import NodeProto
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well as prebuilt type
"""
@abstractmet... | from abc import ABC, abstractmethod
from docarray.proto import NodeProto
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well as prebuilt type
"""
@abstractmethod
def _to_node_protobuf(self) -> NodeProto:
... |
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are... | """Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are... |
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_r... | # Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_r... |
"""Google PaLM embeddings file."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
impor... | """Google PaLM embeddings file."""
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generativ... |
"""Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import uuid
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
... | """Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
"""
S... |
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` an... | # Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` an... |
from typing import List
import pytest
from llama_index.core.schema import (
Document,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\nThis is a test.\n... | from typing import List
import pytest
from llama_index.core.schema import (
Document,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\n"
"This i... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.