id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
21,801 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `get_input_image_file_name` function. Write a Python function `def get_input_image_file_name(data: Dict[str, Any], **_: Dict[str, Any]) -> str` to solve the following problem:
get the image file name of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_input_image_file_name content = get_input_image_file_name({"input": {"image": open("test.png", "rb")}}) # "test.png"
Here is the function:
def get_input_image_file_name(data: Dict[str, Any], **_: Dict[str, Any]) -> str:
"""get the image file name of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_input_image_file_name
content = get_input_image_file_name({"input": {"image": open("test.png", "rb")}})
# "test.png"
"""
input_data = data.get("input")
return input_data["image"].name | get the image file name of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_input_image_file_name content = get_input_image_file_name({"input": {"image": open("test.png", "rb")}}) # "test.png" |
21,802 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `get_image_question` function. Write a Python function `def get_image_question(data: Dict[str, Any], **_: Dict[str, Any]) -> str` to solve the following problem:
get the image and question str of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_image_question content = get_image_question({"image": open("test.png", "rb"), "question": "foo"})
Here is the function:
def get_image_question(data: Dict[str, Any], **_: Dict[str, Any]) -> str: # pragma: no cover
"""get the image and question str of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_image_question
content = get_image_question({"image": open("test.png", "rb"), "question": "foo"})
"""
img = data.get("image")
data_img = str(open(img, "rb").peek()) if isinstance(img, str) else str(img) # pylint: disable=consider-using-with
return data_img + data.get("question") | get the image and question str of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_image_question content = get_image_question({"image": open("test.png", "rb"), "question": "foo"}) |
21,803 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `get_image` function. Write a Python function `def get_image(data: Dict[str, Any], **_: Dict[str, Any]) -> str` to solve the following problem:
get the image of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_image content = get_image({"image": open("test.png", "rb")}) # "test.png"
Here is the function:
def get_image(data: Dict[str, Any], **_: Dict[str, Any]) -> str: # pragma: no cover
"""get the image of the llm request params
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_image
content = get_image({"image": open("test.png", "rb")})
# "test.png"
"""
return data.get("image") | get the image of the llm request params :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_image content = get_image({"image": open("test.png", "rb")}) # "test.png" |
21,804 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `get_messages_last_content` function. Write a Python function `def get_messages_last_content(data: Dict[str, Any], **_: Any) -> str` to solve the following problem:
get the last content of the llm request messages array :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_messages_last_content content = get_messages_last_content({"messages": [{"content": "hello"}, {"content": "world"}]}) # "world"
Here is the function:
def get_messages_last_content(data: Dict[str, Any], **_: Any) -> str:
""" get the last content of the llm request messages array
:param data: the user llm request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_messages_last_content
content = get_messages_last_content({"messages": [{"content": "hello"}, {"content": "world"}]})
# "world"
"""
return data.get("messages")[-1].content | get the last content of the llm request messages array :param data: the user llm request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_messages_last_content content = get_messages_last_content({"messages": [{"content": "hello"}, {"content": "world"}]}) # "world" |
21,805 | import re
import string
from typing import Dict, Any
The provided code snippet includes necessary dependencies for implementing the `get_openai_moderation_input` function. Write a Python function `def get_openai_moderation_input(data: Dict[str, Any], **_: Dict[str, Any]) -> str` to solve the following problem:
get the input param of the openai moderation request params :param data: the user openai moderation request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_openai_moderation_input content = get_openai_moderation_input({"input": ["hello", "world"]}) # "['hello', 'world']"
Here is the function:
def get_openai_moderation_input(data: Dict[str, Any], **_: Dict[str, Any]) -> str:
"""get the input param of the openai moderation request params
:param data: the user openai moderation request data
:type data: Dict[str, Any]
Example:
.. code-block:: python
from gptcache.processor.pre import get_openai_moderation_input
content = get_openai_moderation_input({"input": ["hello", "world"]})
# "['hello', 'world']"
"""
return str(data.get("input")) | get the input param of the openai moderation request params :param data: the user openai moderation request data :type data: Dict[str, Any] Example: .. code-block:: python from gptcache.processor.pre import get_openai_moderation_input content = get_openai_moderation_input({"input": ["hello", "world"]}) # "['hello', 'world']" |
21,806 |
The provided code snippet includes necessary dependencies for implementing the `check_hit_session` function. Write a Python function `def check_hit_session(cur_session_id: str, cache_session_ids: list, cache_questions: list, cache_answer: str)` to solve the following problem:
Check if the sesion result meets the hit requirement. :param cur_session_id: the name of the current session. :type cur_session_id: str :param cache_session_ids: a list of session names for caching the same content if you are using map as a data management method. Otherwise a list of session names for similar content and same answer. :type cache_session_ids: list :param cache_question: a list with one question which same as the you asked if you use a map as a data management method. Otherwise it is a list that is similar to the question you asked with the same answer, and it is correspondence with cache_session_ids. :type cache_question: list :param cache_answer: the content of the cached answer. :param cache_answer: str :return: True or False
Here is the function:
def check_hit_session(cur_session_id: str, cache_session_ids: list, cache_questions: list, cache_answer: str):
"""
Check if the sesion result meets the hit requirement.
:param cur_session_id: the name of the current session.
:type cur_session_id: str
:param cache_session_ids: a list of session names for caching the same content if you are using map as a data management method.
Otherwise a list of session names for similar content and same answer.
:type cache_session_ids: list
:param cache_question: a list with one question which same as the you asked if you use a map as a data management method.
Otherwise it is a list that is similar to the question you asked with the same answer,
and it is correspondence with cache_session_ids.
:type cache_question: list
:param cache_answer: the content of the cached answer.
:param cache_answer: str
:return: True or False
"""
return cur_session_id not in cache_session_ids | Check if the sesion result meets the hit requirement. :param cur_session_id: the name of the current session. :type cur_session_id: str :param cache_session_ids: a list of session names for caching the same content if you are using map as a data management method. Otherwise a list of session names for similar content and same answer. :type cache_session_ids: list :param cache_question: a list with one question which same as the you asked if you use a map as a data management method. Otherwise it is a list that is similar to the question you asked with the same answer, and it is correspondence with cache_session_ids. :type cache_question: list :param cache_answer: the content of the cached answer. :param cache_answer: str :return: True or False |
21,807 | from typing import Dict, Any
import numpy as np
from gptcache.processor import ContextProcess
from gptcache.utils import import_huggingface
import transformers
def summarize_to_length(summarizer, text, target_len, max_len=1024):
tokenizer = summarizer.tokenizer
def token_length(text):
return len(tokenizer.encode(text))
segment_len = max_len - 100
summary_result = text
while token_length(text) > target_len:
tokens = tokenizer.encode(text)
segments = [
tokens[i : i + segment_len] for i in range(0, len(tokens), segment_len - 1)
]
summary_result = ""
for segment in segments:
len_seg = int(len(segment) / 4)
summary = summarizer(
tokenizer.decode(segment),
min_length=max(len_seg - 10, 1),
max_length=len_seg,
)
summary_result += summary[0]["summary_text"]
text = summary_result
return summary_result | null |
21,808 | import random
from typing import List, Any
import numpy
from gptcache.utils import softmax
The provided code snippet includes necessary dependencies for implementing the `random_one` function. Write a Python function `def random_one(messages: List[Any]) -> Any` to solve the following problem:
Randomly select one result after evaluation. :param messages: A list of candidate outputs. :type messages: List[Any] Example: .. code-block:: python from gptcache.processor.post import random_one messages = ["message 1", "message 2", "message 3"] answer = random_one(messages)
Here is the function:
def random_one(messages: List[Any]) -> Any:
"""Randomly select one result after evaluation.
:param messages: A list of candidate outputs.
:type messages: List[Any]
Example:
.. code-block:: python
from gptcache.processor.post import random_one
messages = ["message 1", "message 2", "message 3"]
answer = random_one(messages)
"""
return random.choice(messages) | Randomly select one result after evaluation. :param messages: A list of candidate outputs. :type messages: List[Any] Example: .. code-block:: python from gptcache.processor.post import random_one messages = ["message 1", "message 2", "message 3"] answer = random_one(messages) |
21,809 | import random
from typing import List, Any
import numpy
from gptcache.utils import softmax
The provided code snippet includes necessary dependencies for implementing the `first` function. Write a Python function `def first(messages: List[Any]) -> Any` to solve the following problem:
Get the first result after evaluation. :param messages: A list of candidate outputs. :type messages: List[Any] Example: .. code-block:: python from gptcache.processor.post import first messages = ["message 1", "message 2", "message 3"] answer = first(messages) assert answer = messages[0]
Here is the function:
def first(messages: List[Any]) -> Any:
"""Get the first result after evaluation.
:param messages: A list of candidate outputs.
:type messages: List[Any]
Example:
.. code-block:: python
from gptcache.processor.post import first
messages = ["message 1", "message 2", "message 3"]
answer = first(messages)
assert answer = messages[0]
"""
return messages[0] | Get the first result after evaluation. :param messages: A list of candidate outputs. :type messages: List[Any] Example: .. code-block:: python from gptcache.processor.post import first messages = ["message 1", "message 2", "message 3"] answer = first(messages) assert answer = messages[0] |
21,810 | from typing import Any, Optional, Callable
import gptcache.processor.post
import gptcache.processor.pre
from gptcache import Cache, cache, Config
from gptcache.adapter.adapter import adapt
from gptcache.embedding import (
Onnx,
Huggingface,
SBERT,
FastText,
Data2VecAudio,
Timm,
ViT,
OpenAI,
Cohere,
Rwkv,
PaddleNLP,
UForm,
)
from gptcache.embedding.base import BaseEmbedding
from gptcache.manager import manager_factory
from gptcache.manager.data_manager import DataManager
from gptcache.processor.context import (
SummarizationContextProcess,
SelectiveContextProcess,
ConcatContextProcess,
)
from gptcache.processor.post import temperature_softmax
from gptcache.processor.pre import get_prompt
from gptcache.similarity_evaluation import (
SearchDistanceEvaluation,
NumpyNormEvaluation,
OnnxModelEvaluation,
ExactMatchEvaluation,
KReciprocalEvaluation,
SimilarityEvaluation,
CohereRerankEvaluation,
SequenceMatchEvaluation,
TimeEvaluation,
SbertCrossencoderEvaluation
)
from gptcache.utils import import_ruamel
def get(prompt: str, **kwargs) -> Any:
"""get api, get the cache data according to the `prompt`
Please make sure that the `pre_embedding_func` param is `get_prompt` when initializing the cache
:param prompt: the cache data key, usually question text
:type prompt: str
:param kwargs: list of user-defined parameters
:type kwargs: Dict
Example:
.. code-block:: python
from gptcache.adapter.api import put, get
from gptcache.processor.pre import get_prompt
cache.init(pre_embedding_func=get_prompt)
put("hello", "foo")
print(get("hello"))
"""
res = adapt(
_llm_handle_none,
_cache_data_converter,
_update_cache_callback_none,
prompt=prompt,
**kwargs,
)
return res
def _get_model(model_src, model_config=None):
model_src = model_src.lower()
model_config = model_config or {}
if model_src == "onnx":
return Onnx(**model_config)
if model_src == "huggingface":
return Huggingface(**model_config)
if model_src == "sbert":
return SBERT(**model_config)
if model_src == "fasttext":
return FastText(**model_config)
if model_src == "data2vecaudio":
return Data2VecAudio(**model_config)
if model_src == "timm":
return Timm(**model_config)
if model_src == "vit":
return ViT(**model_config)
if model_src == "openai":
return OpenAI(**model_config)
if model_src == "cohere":
return Cohere(**model_config)
if model_src == "rwkv":
return Rwkv(**model_config)
if model_src == "paddlenlp":
return PaddleNLP(**model_config)
if model_src == "uform":
return UForm(**model_config)
def _get_eval(strategy, kws=None):
strategy = strategy.lower()
kws = kws or {}
if "distance" in strategy:
return SearchDistanceEvaluation(**kws)
if "np" in strategy:
return NumpyNormEvaluation(**kws)
if "exact" in strategy:
return ExactMatchEvaluation()
if "onnx" in strategy:
return OnnxModelEvaluation(**kws)
if "kreciprocal" in strategy:
return KReciprocalEvaluation(**kws)
if "cohere" in strategy:
return CohereRerankEvaluation(**kws)
if "sequence_match" in strategy:
return SequenceMatchEvaluation(**kws)
if "time" in strategy:
return TimeEvaluation(**kws)
if "sbert_crossencoder" in strategy:
return SbertCrossencoderEvaluation(**kws)
def _get_pre_func(pre_process):
return getattr(gptcache.processor.pre, pre_process)
def _get_pre_context_function(pre_context_process, kws=None):
pre_context_process = pre_context_process.lower()
kws = kws or {}
if pre_context_process in "summarization":
return SummarizationContextProcess(**kws)
if pre_context_process in "selective":
return SelectiveContextProcess(**kws)
if pre_context_process in "concat":
return ConcatContextProcess()
def _get_post_func(post_process):
return getattr(gptcache.processor.post, post_process)
def import_ruamel():
_check_library("ruamel-yaml")
def init_similar_cache_from_config(config_dir: str, cache_obj: Optional[Cache] = None):
import_ruamel()
from ruamel.yaml import YAML # pylint: disable=C0415
if config_dir:
with open(config_dir, "r", encoding="utf-8") as f:
yaml = YAML(typ="unsafe", pure=True)
init_conf = yaml.load(f)
else:
init_conf = {}
# Due to the problem with the first naming, it is reserved to ensure compatibility
embedding = init_conf.get("model_source", "")
if not embedding:
embedding = init_conf.get("embedding", "onnx")
# ditto
embedding_config = init_conf.get("model_config", {})
if not embedding_config:
embedding_config = init_conf.get("embedding_config", {})
embedding_model = _get_model(embedding, embedding_config)
storage_config = init_conf.get("storage_config", {})
storage_config.setdefault("manager", "sqlite,faiss")
storage_config.setdefault("data_dir", "gptcache_data")
storage_config.setdefault("vector_params", {})
storage_config["vector_params"] = storage_config["vector_params"] or {}
storage_config["vector_params"]["dimension"] = embedding_model.dimension
data_manager = manager_factory(**storage_config)
eval_strategy = init_conf.get("evaluation", "distance")
# Due to the problem with the first naming, it is reserved to ensure compatibility
eval_config = init_conf.get("evaluation_kws", {})
if not eval_config:
eval_config = init_conf.get("evaluation_config", {})
evaluation = _get_eval(eval_strategy, eval_config)
cache_obj = cache_obj if cache_obj else cache
pre_process = init_conf.get("pre_context_function")
if pre_process:
pre_func = _get_pre_context_function(
pre_process, init_conf.get("pre_context_config")
)
pre_func = pre_func.pre_process
else:
pre_process = init_conf.get("pre_function", "get_prompt")
pre_func = _get_pre_func(pre_process)
post_process = init_conf.get("post_function", "first")
post_func = _get_post_func(post_process)
config_kws = init_conf.get("config", {}) or {}
config = Config(**config_kws)
cache_obj.init(
pre_embedding_func=pre_func,
embedding_func=embedding_model.to_embeddings,
data_manager=data_manager,
similarity_evaluation=evaluation,
post_process_messages_func=post_func,
config=config,
)
return init_conf | null |
21,811 | from typing import Any
from gptcache.adapter.adapter import adapt
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_huggingface, import_torch
from transformers import pipeline
def _cache_data_convert(cache_data):
return [{"generated_text": cache_data, "gptcache": True}] | null |
21,812 | from typing import Any
from gptcache.adapter.adapter import adapt
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_huggingface, import_torch
from transformers import pipeline
class DataType(IntEnum):
class Answer:
def _update_cache_callback(llm_data, update_cache_func, *args, **kwargs): # pylint: disable=unused-argument
update_cache_func(Answer(llm_data[0]["generated_text"], DataType.STR))
return llm_data | null |
21,813 | from typing import Optional, List, Any, Mapping
from gptcache.adapter.adapter import adapt, aadapt
from gptcache.core import cache
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.session import Session
from gptcache.utils import import_langchain
from langchain.llms.base import LLM
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
BaseMessage,
LLMResult,
AIMessage,
ChatGeneration,
ChatResult,
)
from langchain.callbacks.manager import (
Callbacks,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
)
def _cache_data_convert(cache_data):
return cache_data | null |
21,814 | from typing import Optional, List, Any, Mapping
from gptcache.adapter.adapter import adapt, aadapt
from gptcache.core import cache
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.session import Session
from gptcache.utils import import_langchain
from langchain.llms.base import LLM
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
BaseMessage,
LLMResult,
AIMessage,
ChatGeneration,
ChatResult,
)
from langchain.callbacks.manager import (
Callbacks,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
)
class DataType(IntEnum):
STR = 0
IMAGE_BASE64 = 1
IMAGE_URL = 2
class Answer:
"""
data_type:
0: str
1: base64 image
"""
answer: Any
answer_type: int = DataType.STR
def _update_cache_callback(
llm_data, update_cache_func, *args, **kwargs
): # pylint: disable=unused-argument
update_cache_func(Answer(llm_data, DataType.STR))
return llm_data | null |
21,815 | from typing import Optional, List, Any, Mapping
from gptcache.adapter.adapter import adapt, aadapt
from gptcache.core import cache
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.session import Session
from gptcache.utils import import_langchain
from langchain.llms.base import LLM
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
BaseMessage,
LLMResult,
AIMessage,
ChatGeneration,
ChatResult,
)
from langchain.callbacks.manager import (
Callbacks,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
)
def _cache_msg_data_convert(cache_data):
llm_res = ChatResult(
generations=[
ChatGeneration(
text="",
generation_info=None,
message=AIMessage(content=cache_data, additional_kwargs={}),
)
],
llm_output=None,
)
return llm_res | null |
21,816 | from typing import Optional, List, Any, Mapping
from gptcache.adapter.adapter import adapt, aadapt
from gptcache.core import cache
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.session import Session
from gptcache.utils import import_langchain
from langchain.llms.base import LLM
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
BaseMessage,
LLMResult,
AIMessage,
ChatGeneration,
ChatResult,
)
from langchain.callbacks.manager import (
Callbacks,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
)
def _update_cache_msg_callback(
llm_data, update_cache_func, *args, **kwargs
): # pylint: disable=unused-argument
update_cache_func(llm_data.generations[0].text)
return llm_data | null |
21,817 | import base64
import warnings
from dataclasses import dataclass
from io import BytesIO
from typing import List
from gptcache.adapter.adapter import adapt
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import (
import_stability, import_pillow
)
from gptcache.utils.error import CacheError
from PIL import Image as PILImage
from stability_sdk import client
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
class MockArtifact:
class MockAnswer:
def _construct_resp_from_cache(img_64, height, width):
img_bytes = base64.b64decode((img_64))
img_file = BytesIO(img_bytes)
img = PILImage.open(img_file)
new_size = (width, height)
if new_size != img.size:
img = img.resize(new_size)
buffered = BytesIO()
img.save(buffered, format="PNG")
img_bytes = buffered.getvalue()
yield MockAnswer(artifacts=[MockArtifact(type=generation.ARTIFACT_IMAGE, binary=img_bytes)]) | null |
21,818 | import time
from typing import Iterator
from gptcache.adapter.adapter import adapt
from gptcache.manager.scalar_data.base import DataType, Answer
from gptcache.utils import import_llama_cpp_python
import llama_cpp
def _construct_resp_from_cache(return_message):
return {
"gptcache": True,
"choices": [
{
"text": return_message,
"finish_reason": "stop",
"index": 0,
}
],
"created": int(time.time()),
"usage": {"completion_tokens": 0, "prompt_tokens": 0, "total_tokens": 0},
"object": "chat.completion",
} | null |
21,819 | import time
from typing import Iterator
from gptcache.adapter.adapter import adapt
from gptcache.manager.scalar_data.base import DataType, Answer
from gptcache.utils import import_llama_cpp_python
import llama_cpp
def _construct_stream_resp_from_cache(return_message):
return [
{
"gptcache": True,
"choices": [
{
"text": return_message,
"finish_reason": None,
"index": 0,
}
],
"created": int(time.time()),
"usage": {"completion_tokens": 0, "prompt_tokens": 0, "total_tokens": 0},
"object": "chat.completion",
}
] | null |
21,820 | import base64
import json
import os
import time
from io import BytesIO
from typing import Any, AsyncGenerator, Iterator, List
from gptcache import cache
from gptcache.adapter.adapter import aadapt, adapt
from gptcache.adapter.base import BaseCacheLLM
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_openai, import_pillow
from gptcache.utils.error import wrap_error
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
from gptcache.utils.token import token_counter
import openai
async def async_iter(input_list):
for item in input_list:
yield item | null |
21,821 | import base64
import json
import os
import time
from io import BytesIO
from typing import Any, AsyncGenerator, Iterator, List
from gptcache import cache
from gptcache.adapter.adapter import aadapt, adapt
from gptcache.adapter.base import BaseCacheLLM
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_openai, import_pillow
from gptcache.utils.error import wrap_error
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
from gptcache.utils.token import token_counter
import openai
def _construct_resp_from_cache(return_message, saved_token):
return {
"gptcache": True,
"saved_token": saved_token,
"choices": [
{
"message": {"role": "assistant", "content": return_message},
"finish_reason": "stop",
"index": 0,
}
],
"created": int(time.time()),
"usage": {"completion_tokens": 0, "prompt_tokens": 0, "total_tokens": 0},
"object": "chat.completion",
} | null |
21,822 | import base64
import json
import os
import time
from io import BytesIO
from typing import Any, AsyncGenerator, Iterator, List
from gptcache import cache
from gptcache.adapter.adapter import aadapt, adapt
from gptcache.adapter.base import BaseCacheLLM
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_openai, import_pillow
from gptcache.utils.error import wrap_error
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
from gptcache.utils.token import token_counter
import openai
def _construct_stream_resp_from_cache(return_message, saved_token):
created = int(time.time())
return [
{
"choices": [
{"delta": {"role": "assistant"}, "finish_reason": None, "index": 0}
],
"created": created,
"object": "chat.completion.chunk",
},
{
"choices": [
{
"delta": {"content": return_message},
"finish_reason": None,
"index": 0,
}
],
"created": created,
"object": "chat.completion.chunk",
},
{
"gptcache": True,
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"created": created,
"object": "chat.completion.chunk",
"saved_token": saved_token,
},
] | null |
21,823 | import base64
import json
import os
import time
from io import BytesIO
from typing import Any, AsyncGenerator, Iterator, List
from gptcache import cache
from gptcache.adapter.adapter import aadapt, adapt
from gptcache.adapter.base import BaseCacheLLM
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_openai, import_pillow
from gptcache.utils.error import wrap_error
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
from gptcache.utils.token import token_counter
import openai
def _construct_text_from_cache(return_text):
return {
"gptcache": True,
"choices": [
{
"text": return_text,
"finish_reason": "stop",
"index": 0,
}
],
"created": int(time.time()),
"usage": {"completion_tokens": 0, "prompt_tokens": 0, "total_tokens": 0},
"object": "text_completion",
} | null |
21,824 | import base64
import json
import os
import time
from io import BytesIO
from typing import Any, AsyncGenerator, Iterator, List
from gptcache import cache
from gptcache.adapter.adapter import aadapt, adapt
from gptcache.adapter.base import BaseCacheLLM
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_openai, import_pillow
from gptcache.utils.error import wrap_error
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
from gptcache.utils.token import token_counter
import openai
class Image(openai.Image):
"""Openai Image Wrapper
Example:
.. code-block:: python
from gptcache import cache
from gptcache.processor.pre import get_prompt
# init gptcache
cache.init(pre_embedding_func=get_prompt)
cache.set_openai_key()
from gptcache.adapter import openai
# run image generation model with gptcache
response = openai.Image.create(
prompt="a white siamese cat",
n=1,
size="256x256"
)
response_url = response['data'][0]['url']
"""
def _llm_handler(cls, *llm_args, **llm_kwargs):
try:
return super().create(*llm_args, **llm_kwargs)
except openai.OpenAIError as e:
raise wrap_error(e) from e
def create(cls, *args, **kwargs):
response_format = kwargs.pop("response_format", "url")
size = kwargs.pop("size", "256x256")
def cache_data_convert(cache_data):
return _construct_image_create_resp_from_cache(
image_data=cache_data, response_format=response_format, size=size
)
def update_cache_callback(
llm_data, update_cache_func, *args, **kwargs
): # pylint: disable=unused-argument
if response_format == "b64_json":
img_b64 = get_image_from_openai_b64(llm_data)
if isinstance(img_b64, str):
img_b64 = img_b64.encode("ascii")
update_cache_func(Answer(img_b64, DataType.IMAGE_BASE64))
elif response_format == "url":
update_cache_func(
Answer(get_image_from_openai_url(llm_data), DataType.IMAGE_URL)
)
return llm_data
return adapt(
cls._llm_handler,
cache_data_convert,
update_cache_callback,
response_format=response_format,
size=size,
*args,
**kwargs,
)
def import_pillow():
_check_library("PIL", package="pillow")
def _construct_image_create_resp_from_cache(image_data, response_format, size):
import_pillow()
from PIL import Image as PILImage # pylint: disable=C0415
img_bytes = base64.b64decode((image_data))
img_file = BytesIO(img_bytes) # convert image to file-like object
img = PILImage.open(img_file)
new_size = tuple(int(a) for a in size.split("x"))
if new_size != img.size:
img = img.resize(new_size)
buffered = BytesIO()
img.save(buffered, format="JPEG")
else:
buffered = img_file
if response_format == "url":
target_url = os.path.abspath(str(int(time.time())) + ".jpeg")
with open(target_url, "wb") as f:
f.write(buffered.getvalue())
image_data = target_url
elif response_format == "b64_json":
image_data = base64.b64encode(buffered.getvalue()).decode("ascii")
else:
raise AttributeError(
f"Invalid response_format: {response_format} is not one of ['url', 'b64_json']"
)
return {
"gptcache": True,
"created": int(time.time()),
"data": [{response_format: image_data}],
} | null |
21,825 | import base64
import json
import os
import time
from io import BytesIO
from typing import Any, AsyncGenerator, Iterator, List
from gptcache import cache
from gptcache.adapter.adapter import aadapt, adapt
from gptcache.adapter.base import BaseCacheLLM
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_openai, import_pillow
from gptcache.utils.error import wrap_error
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
from gptcache.utils.token import token_counter
import openai
def _construct_audio_text_from_cache(return_text):
return {
"gptcache": True,
"text": return_text,
} | null |
21,826 | import base64
import json
import os
import time
from io import BytesIO
from typing import Any, AsyncGenerator, Iterator, List
from gptcache import cache
from gptcache.adapter.adapter import aadapt, adapt
from gptcache.adapter.base import BaseCacheLLM
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import import_openai, import_pillow
from gptcache.utils.error import wrap_error
from gptcache.utils.response import (
get_audio_text_from_openai_answer,
get_image_from_openai_b64,
get_image_from_openai_url,
get_message_from_openai_answer,
get_stream_message_from_openai_answer,
get_text_from_openai_answer,
)
from gptcache.utils.token import token_counter
import openai
def token_counter(text):
"""Token Counter"""
num_tokens = len(_get_encoding().encode(text))
return num_tokens
The provided code snippet includes necessary dependencies for implementing the `_num_tokens_from_messages` function. Write a Python function `def _num_tokens_from_messages(messages)` to solve the following problem:
Returns the number of tokens used by a list of messages.
Here is the function:
def _num_tokens_from_messages(messages):
"""Returns the number of tokens used by a list of messages."""
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += token_counter(value)
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens | Returns the number of tokens used by a list of messages. |
21,827 | import base64
from io import BytesIO
from gptcache.adapter.adapter import adapt
from gptcache.manager.scalar_data.base import Answer, DataType
from gptcache.utils import (
import_pillow, import_diffusers, import_huggingface
)
from gptcache.utils.error import CacheError
from PIL import Image
import diffusers
import diffusers
def _construct_resp_from_cache(img_64):
im_bytes = base64.b64decode(img_64) # im_bytes is a binary image
im_file = BytesIO(im_bytes) # convert image to file-like object
img = Image.open(im_file)
return diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput(images=[img], nsfw_content_detected=None) | null |
21,828 | import time
import numpy as np
from gptcache import cache
from gptcache.processor.post import temperature_softmax
from gptcache.utils.error import NotInitError
from gptcache.utils.log import gptcache_log
from gptcache.utils.time import time_cal
def adapt(llm_handler, cache_data_convert, update_cache_callback, *args, **kwargs):
"""Adapt to different llm
:param llm_handler: LLM calling method, when the cache misses, this function will be called
:param cache_data_convert: When the cache hits, convert the answer in the cache to the format of the result returned by llm
:param update_cache_callback: If the cache misses, after getting the result returned by llm, save the result to the cache
:param args: llm args
:param kwargs: llm kwargs
:return: llm result
"""
start_time = time.time()
search_only_flag = kwargs.pop("search_only", False)
user_temperature = "temperature" in kwargs
user_top_k = "top_k" in kwargs
temperature = kwargs.pop("temperature", 0.0)
chat_cache = kwargs.pop("cache_obj", cache)
session = kwargs.pop("session", None)
require_object_store = kwargs.pop("require_object_store", False)
if require_object_store:
assert chat_cache.data_manager.o, "Object store is required for adapter."
if not chat_cache.has_init:
raise NotInitError()
cache_enable = chat_cache.cache_enable_func(*args, **kwargs)
context = kwargs.pop("cache_context", {})
embedding_data = None
# you want to retry to send the request to chatgpt when the cache is negative
if 0 < temperature < 2:
cache_skip_options = [True, False]
prob_cache_skip = [0, 1]
cache_skip = kwargs.pop(
"cache_skip",
temperature_softmax(
messages=cache_skip_options,
scores=prob_cache_skip,
temperature=temperature,
),
)
elif temperature >= 2:
cache_skip = kwargs.pop("cache_skip", True)
else: # temperature <= 0
cache_skip = kwargs.pop("cache_skip", False)
cache_factor = kwargs.pop("cache_factor", 1.0)
pre_embedding_res = time_cal(
chat_cache.pre_embedding_func,
func_name="pre_process",
report_func=chat_cache.report.pre,
)(
kwargs,
extra_param=context.get("pre_embedding_func", None),
prompts=chat_cache.config.prompts,
cache_config=chat_cache.config,
)
if isinstance(pre_embedding_res, tuple):
pre_store_data = pre_embedding_res[0]
pre_embedding_data = pre_embedding_res[1]
else:
pre_store_data = pre_embedding_res
pre_embedding_data = pre_embedding_res
if chat_cache.config.input_summary_len is not None:
pre_embedding_data = _summarize_input(
pre_embedding_data, chat_cache.config.input_summary_len
)
if cache_enable:
embedding_data = time_cal(
chat_cache.embedding_func,
func_name="embedding",
report_func=chat_cache.report.embedding,
)(pre_embedding_data, extra_param=context.get("embedding_func", None))
if cache_enable and not cache_skip:
search_data_list = time_cal(
chat_cache.data_manager.search,
func_name="search",
report_func=chat_cache.report.search,
)(
embedding_data,
extra_param=context.get("search_func", None),
top_k=kwargs.pop("top_k", 5)
if (user_temperature and not user_top_k)
else kwargs.pop("top_k", -1),
)
if search_data_list is None:
search_data_list = []
cache_answers = []
similarity_threshold = chat_cache.config.similarity_threshold
min_rank, max_rank = chat_cache.similarity_evaluation.range()
rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor
rank_threshold = (
max_rank
if rank_threshold > max_rank
else min_rank
if rank_threshold < min_rank
else rank_threshold
)
for search_data in search_data_list:
cache_data = time_cal(
chat_cache.data_manager.get_scalar_data,
func_name="get_data",
report_func=chat_cache.report.data,
)(
search_data,
extra_param=context.get("get_scalar_data", None),
session=session,
)
if cache_data is None:
continue
# cache consistency check
if chat_cache.config.data_check:
is_healthy = cache_health_check(
chat_cache.data_manager.v,
{
"embedding": cache_data.embedding_data,
"search_result": search_data,
},
)
if not is_healthy:
continue
if "deps" in context and hasattr(cache_data.question, "deps"):
eval_query_data = {
"question": context["deps"][0]["data"],
"embedding": None,
}
eval_cache_data = {
"question": cache_data.question.deps[0].data,
"answer": cache_data.answers[0].answer,
"search_result": search_data,
"cache_data": cache_data,
"embedding": None,
}
else:
eval_query_data = {
"question": pre_store_data,
"embedding": embedding_data,
}
eval_cache_data = {
"question": cache_data.question,
"answer": cache_data.answers[0].answer,
"search_result": search_data,
"cache_data": cache_data,
"embedding": cache_data.embedding_data,
}
rank = time_cal(
chat_cache.similarity_evaluation.evaluation,
func_name="evaluation",
report_func=chat_cache.report.evaluation,
)(
eval_query_data,
eval_cache_data,
extra_param=context.get("evaluation_func", None),
)
gptcache_log.debug(
"similarity: [user question] %s, [cache question] %s, [value] %f",
pre_store_data,
cache_data.question,
rank,
)
if rank_threshold <= rank:
cache_answers.append(
(float(rank), cache_data.answers[0].answer, search_data, cache_data)
)
chat_cache.data_manager.hit_cache_callback(search_data)
cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True)
answers_dict = dict((d[1], d) for d in cache_answers)
if len(cache_answers) != 0:
hit_callback = kwargs.pop("hit_callback", None)
if hit_callback and callable(hit_callback):
factor = max_rank - min_rank
hit_callback([(d[3].question, d[0] / factor if factor else d[0]) for d in cache_answers])
def post_process():
if chat_cache.post_process_messages_func is temperature_softmax:
return_message = chat_cache.post_process_messages_func(
messages=[t[1] for t in cache_answers],
scores=[t[0] for t in cache_answers],
temperature=temperature,
)
else:
return_message = chat_cache.post_process_messages_func(
[t[1] for t in cache_answers]
)
return return_message
return_message = time_cal(
post_process,
func_name="post_process",
report_func=chat_cache.report.post,
)()
chat_cache.report.hint_cache()
cache_whole_data = answers_dict.get(str(return_message))
if session and cache_whole_data:
chat_cache.data_manager.add_session(
cache_whole_data[2], session.name, pre_embedding_data
)
if cache_whole_data and not chat_cache.config.disable_report:
# user_question / cache_question / cache_question_id / cache_answer / similarity / consume time/ time
report_cache_data = cache_whole_data[3]
report_search_data = cache_whole_data[2]
chat_cache.data_manager.report_cache(
pre_store_data if isinstance(pre_store_data, str) else "",
report_cache_data.question
if isinstance(report_cache_data.question, str)
else "",
report_search_data[1],
report_cache_data.answers[0].answer
if isinstance(report_cache_data.answers[0].answer, str)
else "",
cache_whole_data[0],
round(time.time() - start_time, 6),
)
return cache_data_convert(return_message)
next_cache = chat_cache.next_cache
if next_cache:
kwargs["cache_obj"] = next_cache
kwargs["cache_context"] = context
kwargs["cache_skip"] = cache_skip
kwargs["cache_factor"] = cache_factor
kwargs["search_only"] = search_only_flag
llm_data = adapt(
llm_handler, cache_data_convert, update_cache_callback, *args, **kwargs
)
else:
if search_only_flag:
# cache miss
return None
llm_data = time_cal(
llm_handler, func_name="llm_request", report_func=chat_cache.report.llm
)(*args, **kwargs)
if not llm_data:
return None
if cache_enable:
try:
def update_cache_func(handled_llm_data, question=None):
if question is None:
question = pre_store_data
else:
question.content = pre_store_data
time_cal(
chat_cache.data_manager.save,
func_name="save",
report_func=chat_cache.report.save,
)(
question,
handled_llm_data,
embedding_data,
extra_param=context.get("save_func", None),
session=session,
)
if (
chat_cache.report.op_save.count > 0
and chat_cache.report.op_save.count % chat_cache.config.auto_flush
== 0
):
chat_cache.flush()
llm_data = update_cache_callback(
llm_data, update_cache_func, *args, **kwargs
)
except Exception as e: # pylint: disable=W0703
gptcache_log.warning("failed to save the data to cache, error: %s", e)
return llm_data
def _summarize_input(text, text_length):
if len(text) <= text_length:
return text
# pylint: disable=import-outside-toplevel
from gptcache.processor.context.summarization_context import (
SummarizationContextProcess,
)
global _input_summarizer
if _input_summarizer is None:
_input_summarizer = SummarizationContextProcess()
summarization = _input_summarizer.summarize_to_sentence([text], text_length)
return summarization
def temperature_softmax(messages: List[Any], scores: List[float], temperature: float = 0.0) -> Any:
"""Post processing with temperature softmax after evaluation.
:param messages: A list of candidate outputs.
:type messages: List[Any]
:param scores: A list of evaluation scores corresponding to `messages`
:type scores: List[float]
:param temperature: A non-negative number of sampling temperature, defaults to 0.
A higher temperature makes the output more random.
A lower temperature means a more deterministic and confident output.
:type temperature: float
Example:
.. code-block:: python
from gptcache.processor.post import temperature_softmax
messages = ["message 1", "message 2", "message 3"]
scores = [0.9, 0.5, 0.1]
answer = temperature_softmax(messages, scores, temperature=0.5)
"""
if temperature > 0:
scores = softmax([x / temperature for x in scores])
return numpy.random.choice(messages, size=1, p=scores)[0]
else:
m_s = list(zip(messages, scores))
return sorted(m_s, key=lambda x: x[1], reverse=True)[0][0]
class NotInitError(CacheError):
"""Raise when the cache has been used before it's inited"""
def __init__(self):
super().__init__("The cache should be inited before using")
gptcache_log = logging.getLogger(f'gptcache:{gptcache.__version__}')
def time_cal(func, func_name=None, report_func=None):
def inner(*args, **kwargs):
time_start = time.time()
res = func(*args, **kwargs)
delta_time = time.time() - time_start
if cache.config.log_time_func:
cache.config.log_time_func(
func.__name__ if func_name is None else func_name, delta_time
)
if report_func is not None:
report_func(delta_time)
return res
return inner
The provided code snippet includes necessary dependencies for implementing the `aadapt` function. Write a Python function `async def aadapt( llm_handler, cache_data_convert, update_cache_callback, *args, **kwargs )` to solve the following problem:
Simple copy of the 'adapt' method to different llm for 'async llm function' :param llm_handler: Async LLM calling method, when the cache misses, this function will be called :param cache_data_convert: When the cache hits, convert the answer in the cache to the format of the result returned by llm :param update_cache_callback: If the cache misses, after getting the result returned by llm, save the result to the cache :param args: llm args :param kwargs: llm kwargs :return: llm result
Here is the function:
async def aadapt(
llm_handler, cache_data_convert, update_cache_callback, *args, **kwargs
):
"""Simple copy of the 'adapt' method to different llm for 'async llm function'
:param llm_handler: Async LLM calling method, when the cache misses, this function will be called
:param cache_data_convert: When the cache hits, convert the answer in the cache to the format of the result returned by llm
:param update_cache_callback: If the cache misses, after getting the result returned by llm, save the result to the cache
:param args: llm args
:param kwargs: llm kwargs
:return: llm result
"""
start_time = time.time()
user_temperature = "temperature" in kwargs
user_top_k = "top_k" in kwargs
temperature = kwargs.pop("temperature", 0.0)
chat_cache = kwargs.pop("cache_obj", cache)
session = kwargs.pop("session", None)
require_object_store = kwargs.pop("require_object_store", False)
if require_object_store:
assert chat_cache.data_manager.o, "Object store is required for adapter."
if not chat_cache.has_init:
raise NotInitError()
cache_enable = chat_cache.cache_enable_func(*args, **kwargs)
context = kwargs.pop("cache_context", {})
embedding_data = None
# you want to retry to send the request to chatgpt when the cache is negative
if 0 < temperature < 2:
cache_skip_options = [True, False]
prob_cache_skip = [0, 1]
cache_skip = kwargs.pop(
"cache_skip",
temperature_softmax(
messages=cache_skip_options,
scores=prob_cache_skip,
temperature=temperature,
),
)
elif temperature >= 2:
cache_skip = kwargs.pop("cache_skip", True)
else: # temperature <= 0
cache_skip = kwargs.pop("cache_skip", False)
cache_factor = kwargs.pop("cache_factor", 1.0)
pre_embedding_res = time_cal(
chat_cache.pre_embedding_func,
func_name="pre_process",
report_func=chat_cache.report.pre,
)(
kwargs,
extra_param=context.get("pre_embedding_func", None),
prompts=chat_cache.config.prompts,
cache_config=chat_cache.config,
)
if isinstance(pre_embedding_res, tuple):
pre_store_data = pre_embedding_res[0]
pre_embedding_data = pre_embedding_res[1]
else:
pre_store_data = pre_embedding_res
pre_embedding_data = pre_embedding_res
if chat_cache.config.input_summary_len is not None:
pre_embedding_data = _summarize_input(
pre_embedding_data, chat_cache.config.input_summary_len
)
if cache_enable:
embedding_data = time_cal(
chat_cache.embedding_func,
func_name="embedding",
report_func=chat_cache.report.embedding,
)(pre_embedding_data, extra_param=context.get("embedding_func", None))
if cache_enable and not cache_skip:
search_data_list = time_cal(
chat_cache.data_manager.search,
func_name="search",
report_func=chat_cache.report.search,
)(
embedding_data,
extra_param=context.get("search_func", None),
top_k=kwargs.pop("top_k", 5)
if (user_temperature and not user_top_k)
else kwargs.pop("top_k", -1),
)
if search_data_list is None:
search_data_list = []
cache_answers = []
similarity_threshold = chat_cache.config.similarity_threshold
min_rank, max_rank = chat_cache.similarity_evaluation.range()
rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor
rank_threshold = (
max_rank
if rank_threshold > max_rank
else min_rank
if rank_threshold < min_rank
else rank_threshold
)
for search_data in search_data_list:
cache_data = time_cal(
chat_cache.data_manager.get_scalar_data,
func_name="get_data",
report_func=chat_cache.report.data,
)(
search_data,
extra_param=context.get("get_scalar_data", None),
session=session,
)
if cache_data is None:
continue
if "deps" in context and hasattr(cache_data.question, "deps"):
eval_query_data = {
"question": context["deps"][0]["data"],
"embedding": None,
}
eval_cache_data = {
"question": cache_data.question.deps[0].data,
"answer": cache_data.answers[0].answer,
"search_result": search_data,
"cache_data": cache_data,
"embedding": None,
}
else:
eval_query_data = {
"question": pre_store_data,
"embedding": embedding_data,
}
eval_cache_data = {
"question": cache_data.question,
"answer": cache_data.answers[0].answer,
"search_result": search_data,
"cache_data": cache_data,
"embedding": cache_data.embedding_data,
}
rank = time_cal(
chat_cache.similarity_evaluation.evaluation,
func_name="evaluation",
report_func=chat_cache.report.evaluation,
)(
eval_query_data,
eval_cache_data,
extra_param=context.get("evaluation_func", None),
)
gptcache_log.debug(
"similarity: [user question] %s, [cache question] %s, [value] %f",
pre_store_data,
cache_data.question,
rank,
)
if rank_threshold <= rank:
cache_answers.append(
(float(rank), cache_data.answers[0].answer, search_data, cache_data)
)
chat_cache.data_manager.hit_cache_callback(search_data)
cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True)
answers_dict = dict((d[1], d) for d in cache_answers)
if len(cache_answers) != 0:
def post_process():
if chat_cache.post_process_messages_func is temperature_softmax:
return_message = chat_cache.post_process_messages_func(
messages=[t[1] for t in cache_answers],
scores=[t[0] for t in cache_answers],
temperature=temperature,
)
else:
return_message = chat_cache.post_process_messages_func(
[t[1] for t in cache_answers]
)
return return_message
return_message = time_cal(
post_process,
func_name="post_process",
report_func=chat_cache.report.post,
)()
chat_cache.report.hint_cache()
cache_whole_data = answers_dict.get(str(return_message))
if session and cache_whole_data:
chat_cache.data_manager.add_session(
cache_whole_data[2], session.name, pre_embedding_data
)
if cache_whole_data:
# user_question / cache_question / cache_question_id / cache_answer / similarity / consume time/ time
report_cache_data = cache_whole_data[3]
report_search_data = cache_whole_data[2]
chat_cache.data_manager.report_cache(
pre_store_data if isinstance(pre_store_data, str) else "",
report_cache_data.question
if isinstance(report_cache_data.question, str)
else "",
report_search_data[1],
report_cache_data.answers[0].answer
if isinstance(report_cache_data.answers[0].answer, str)
else "",
cache_whole_data[0],
round(time.time() - start_time, 6),
)
return cache_data_convert(return_message)
next_cache = chat_cache.next_cache
if next_cache:
kwargs["cache_obj"] = next_cache
kwargs["cache_context"] = context
kwargs["cache_skip"] = cache_skip
kwargs["cache_factor"] = cache_factor
llm_data = adapt(
llm_handler, cache_data_convert, update_cache_callback, *args, **kwargs
)
else:
llm_data = await llm_handler(*args, **kwargs)
if cache_enable:
try:
def update_cache_func(handled_llm_data, question=None):
if question is None:
question = pre_store_data
else:
question.content = pre_store_data
time_cal(
chat_cache.data_manager.save,
func_name="save",
report_func=chat_cache.report.save,
)(
question,
handled_llm_data,
embedding_data,
extra_param=context.get("save_func", None),
session=session,
)
if (
chat_cache.report.op_save.count > 0
and chat_cache.report.op_save.count % chat_cache.config.auto_flush
== 0
):
chat_cache.flush()
llm_data = update_cache_callback(
llm_data, update_cache_func, *args, **kwargs
)
except Exception: # pylint: disable=W0703
gptcache_log.error("failed to save the data to cache", exc_info=True)
return llm_data | Simple copy of the 'adapt' method to different llm for 'async llm function' :param llm_handler: Async LLM calling method, when the cache misses, this function will be called :param cache_data_convert: When the cache hits, convert the answer in the cache to the format of the result returned by llm :param update_cache_callback: If the cache misses, after getting the result returned by llm, save the result to the cache :param args: llm args :param kwargs: llm kwargs :return: llm result |
21,829 | from typing import Dict, List, Tuple, Any
import numpy as np
from gptcache.similarity_evaluation import SimilarityEvaluation
from gptcache.utils import (
import_onnxruntime,
import_huggingface_hub,
import_huggingface,
)
from transformers import AutoTokenizer
from huggingface_hub import hf_hub_download
import onnxruntime
def pad_sequence(input_ids_list: List[np.ndarray], padding_value: int = 0):
max_len = max(len(sequence) for sequence in input_ids_list)
padded_sequences = np.full((len(input_ids_list), max_len), padding_value)
for i, sequence in enumerate(input_ids_list):
padded_sequences[i, : len(sequence)] = sequence
return padded_sequences | null |
21,830 | from typing import Tuple, Dict, Any, List
import numpy as np
from gptcache.adapter.api import _get_model
from gptcache.similarity_evaluation import SimilarityEvaluation
def euclidean_distance_calculate(vec_l: np.array, vec_r: np.array):
return np.sum((vec_l - vec_r) ** 2) | null |
21,831 | from typing import Tuple, Dict, Any, List
import numpy as np
from gptcache.adapter.api import _get_model
from gptcache.similarity_evaluation import SimilarityEvaluation
def reweight(weights, length):
if length >= len(weights):
return weights
else:
reweighted_ws = []
sum_ws = 0
for i in range(length):
sum_ws += weights[i]
for i in range(length):
reweighted_ws.append(weights[i] * (1 / sum_ws))
return reweighted_ws | null |
21,832 | import numpy as np
from typing import Dict, Any
from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation
from gptcache.manager.vector_data.base import VectorBase
def euclidean_distance_calculate(vec_l: np.array, vec_r: np.array):
return np.sum((vec_l - vec_r)**2) | null |
21,833 |
The provided code snippet includes necessary dependencies for implementing the `to_embeddings` function. Write a Python function `def to_embeddings(data, **_)` to solve the following problem:
Nothing to do, return the origin data
Here is the function:
def to_embeddings(data, **_):
"""Nothing to do, return the origin data"""
return data | Nothing to do, return the origin data |
21,834 | import pickle
from abc import abstractmethod, ABCMeta
from typing import List, Any, Optional, Union
import cachetools
import numpy as np
import requests
from gptcache.manager.eviction import EvictionBase
from gptcache.manager.eviction.distributed_cache import NoOpEviction
from gptcache.manager.eviction_manager import EvictionManager
from gptcache.manager.object_data.base import ObjectBase
from gptcache.manager.scalar_data.base import (
CacheStorage,
CacheData,
DataType,
Answer,
Question,
)
from gptcache.manager.vector_data.base import VectorBase, VectorData
from gptcache.utils.error import CacheError, ParamError
from gptcache.utils.log import gptcache_log
def normalize(vec):
magnitude = np.linalg.norm(vec)
normalized_v = vec / magnitude
return normalized_v | null |
21,835 | from typing import List
import numpy as np
from gptcache.manager.vector_data.base import VectorBase, VectorData
from gptcache.utils import import_sqlalchemy
from sqlalchemy import create_engine, Column, Index, text
from sqlalchemy.types import ( # pylint: disable=C0413
Integer,
UserDefinedType
)
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class _VectorType(UserDefinedType):
"""
pgvector type mapping for sqlalchemy
"""
cache_ok = True
def __init__(self, precision=8):
self.precision = precision
def get_col_spec(self, **_):
return f"vector({self.precision})"
# pylint: disable=unused-argument
def bind_processor(self, dialect):
return lambda value: value
# pylint: disable=unused-argument
def result_processor(self, dialect, coltype):
return lambda value: value
def _get_model_and_index(table_prefix, vector_dimension, index_type, lists):
class VectorStoreTable(Base):
"""
vector store table
"""
__tablename__ = table_prefix + "_pg_vector_store"
__table_args__ = {"extend_existing": True}
id = Column(Integer, primary_key=True, autoincrement=False)
embedding = Column(_VectorType(vector_dimension), nullable=False)
vector_store_index = Index(
f"idx_{table_prefix}_pg_vector_store_embedding",
text(f"embedding {index_type}"),
postgresql_using="ivfflat",
postgresql_with={"lists": lists}
)
vector_store_index.table = VectorStoreTable.__table__
return VectorStoreTable, vector_store_index | null |
21,836 | from typing import Any, Callable, List
import cachetools
from gptcache.manager.eviction.base import EvictionBase
def popitem_wrapper(func, wrapper_func, clean_size):
def wrapper(*args, **kwargs):
keys = []
try:
keys = [func(*args, **kwargs)[0] for _ in range(clean_size)]
except KeyError:
pass
wrapper_func(keys)
return wrapper | null |
21,837 | import os
from pathlib import Path
from typing import Union, Callable
from gptcache.manager import CacheBase, VectorBase, ObjectBase
from gptcache.manager.data_manager import SSDataManager, MapDataManager
from gptcache.manager.eviction import EvictionBase
from gptcache.utils.log import gptcache_log
def get_data_manager(
cache_base: Union[CacheBase, str] = None,
vector_base: Union[VectorBase, str] = None,
object_base: Union[ObjectBase, str] = None,
eviction_base: Union[EvictionBase, str] = None,
max_size: int = 1000,
clean_size=None,
eviction: str = "LRU",
data_path: str = "data_map.txt",
get_data_container: Callable = None,
):
"""Generate `SSDataManager` (with `cache_base`, `vector_base`, `max_size`, `clean_size` and `eviction` params),
or `MAPDataManager` (with `data_path`, `max_size` and `get_data_container` params) to manager the data.
:param cache_base: a CacheBase object, or the name of the cache storage, it is support 'sqlite', 'duckdb', 'postgresql',
'mysql', 'mariadb', 'sqlserver' and 'oracle' now.
:type cache_base: :class:`CacheBase` or str
:param vector_base: a VectorBase object, or the name of the vector storage, it is support 'milvus', 'faiss' and
'chromadb' now.
:type vector_base: :class:`VectorBase` or str
:param object_base: a object storage, supports local path and s3.
:type object_base: :class:`ObjectBase` or str
:param max_size: the max size for the LRU cache in MapDataManager, defaults to 1000.
:type max_size: int
:param eviction_base: a EvictionBase object, or the name of the eviction, it supports:
- 'memory'
- 'redis'
- 'no_op_eviction'.
:type eviction_base: :class:`EvictionBase` or str
:param clean_size: the clean size for the LRU cache in MapDataManager, defaults to None.
:type clean_size: int
:param eviction: the eviction policy for the LRU cache in MapDataManager, defaults to 'LRU'.
:type eviction: str
:param data_path: the path to save the map data, defaults to 'data_map.txt'.
:type data_path: str
:param get_data_container: a Callable to get the data container, defaults to None.
:type get_data_container: Callable
:return: SSDataManager or MapDataManager.
Example:
.. code-block:: python
from gptcache.manager import get_data_manager, CacheBase, VectorBase
data_manager = get_data_manager(CacheBase('sqlite'), VectorBase('faiss', dimension=128))
# or using manager factory enabled with redis cache instead of in-memory cache
# example 1: using redis eviction base with sqlite cache base
data_manager = get_data_manager(cache_base=CacheBase("sqlite"),
vector_base=VectorBase("faiss", dimension=onnx.dimension),
eviction_base=EvictionBase("redis", maxmemory="0", policy="noeviction", ttl=1))
# example 2: using redis eviction base with redis cache base
# here no_op_eviction is used since `redis` cache base already handles evictions internally
data_manager = get_data_manager(cache_base=CacheBase("redis", maxmemory="0", policy="noeviction", ttl=1),
vector_base=VectorBase("faiss", dimension=onnx.dimension),
eviction_base=EvictionBase("no_op_eviction"))
"""
if not cache_base and not vector_base:
return MapDataManager(data_path, max_size, get_data_container)
if isinstance(cache_base, str):
cache_base = CacheBase(name=cache_base)
if isinstance(vector_base, str):
vector_base = VectorBase(name=vector_base)
if isinstance(object_base, str):
object_base = ObjectBase(name=object_base)
if isinstance(eviction_base, str):
eviction_base = EvictionBase(name=eviction_base)
assert cache_base and vector_base
return SSDataManager(cache_base, vector_base, object_base, eviction_base, max_size, clean_size, eviction)
class MapDataManager(DataManager):
"""MapDataManager, store all data in a map data structure.
:param data_path: the path to save the map data, defaults to 'data_map.txt'.
:type data_path: str
:param max_size: the max size for the cache, defaults to 1000.
:type max_size: int
:param get_data_container: a Callable to get the data container, defaults to None.
:type get_data_container: Callable
Example:
.. code-block:: python
from gptcache.manager import get_data_manager
data_manager = get_data_manager("data_map.txt", 1000)
"""
def __init__(self, data_path, max_size, get_data_container=None):
if get_data_container is None:
self.data = cachetools.LRUCache(max_size)
else:
self.data = get_data_container(max_size)
self.data_path = data_path
self.init()
def init(self):
try:
with open(self.data_path, "rb") as f:
self.data = pickle.load(f)
except FileNotFoundError:
return
except PermissionError:
raise CacheError( # pylint: disable=W0707
f"You don't have permission to access this file <{self.data_path}>."
)
def save(self, question, answer, embedding_data, **kwargs):
if isinstance(question, Question):
question = question.content
session = kwargs.get("session", None)
session_id = {session.name} if session else set()
self.data[embedding_data] = (question, answer, embedding_data, session_id)
def import_data(
self,
questions: List[Any],
answers: List[Any],
embedding_datas: List[Any],
session_ids: List[Optional[str]],
):
if (
len(questions) != len(answers)
or len(questions) != len(embedding_datas)
or len(questions) != len(session_ids)
):
raise ParamError("Make sure that all parameters have the same length")
for i, embedding_data in enumerate(embedding_datas):
self.data[embedding_data] = (
questions[i],
answers[i],
embedding_datas[i],
{session_ids[i]} if session_ids[i] else set(),
)
def get_scalar_data(self, res_data, **kwargs) -> CacheData:
session = kwargs.get("session", None)
if session:
answer = (
res_data[1].answer if isinstance(res_data[1], Answer) else res_data[1]
)
if not session.check_hit_func(
session.name, list(res_data[3]), [res_data[0]], answer
):
return None
return CacheData(question=res_data[0], answers=res_data[1])
def search(self, embedding_data, **kwargs):
try:
return [self.data[embedding_data]]
except KeyError:
return []
def flush(self):
try:
with open(self.data_path, "wb") as f:
pickle.dump(self.data, f)
except PermissionError:
gptcache_log.error(
"You don't have permission to access this file %s.", self.data_path
)
def add_session(self, res_data, session_id, pre_embedding_data):
res_data[3].add(session_id)
def list_sessions(self, session_id=None, key=None):
session_ids = set()
for k in self.data:
if session_id and session_id in self.data[k][3]:
session_ids.add(k)
elif len(self.data[k][3]) > 0:
session_ids.update(self.data[k][3])
return list(session_ids)
def delete_session(self, session_id):
keys = self.list_sessions(session_id=session_id)
for k in keys:
self.data[k][3].remove(session_id)
if len(self.data[k][3]) == 0:
del self.data[k]
def close(self):
self.flush()
def EvictionBase(name: str, **kwargs):
"""Generate specific CacheStorage with the configuration.
:param name: the name of the eviction, like: memory
:type name: str
:param policy: eviction strategy
:type policy: str
:param maxsize: the maxsize of cache data
:type maxsize: int
:param clean_size: will clean the size of data when the size of cache data reaches the max size
:type clean_size: int
:param on_evict: the function for cleaning the data in the store
:type on_evict: Callable[[List[Any]], None]
Example:
.. code-block:: python
from gptcache.manager import EvictionBase
cache_base = EvictionBase('memory', policy='lru', maxsize=10, clean_size=2, on_evict=lambda x: print(x))
"""
return eviction_manager.EvictionBase.get(name, **kwargs)
gptcache_log = logging.getLogger(f'gptcache:{gptcache.__version__}')
The provided code snippet includes necessary dependencies for implementing the `manager_factory` function. Write a Python function `def manager_factory(manager="map", data_dir="./", max_size=1000, eviction_manager: str = "memory", get_data_container: Callable = None, scalar_params=None, vector_params=None, object_params=None, eviction_params=None )` to solve the following problem:
Factory of DataManager. By using this factory method, you only need to specify the root directory of the data, and it can automatically manage all the local files. :param manager: Type of DataManager. Supports: Map, or {scalar_name},{vector_name} or {scalar_name},{vector_name},{object_name} :type manager: str :param data_dir: Root path for data storage. :type data_dir: str :param max_size: the max size for the LRU cache in MapDataManager, defaults to 1000. :type max_size: int :param eviction_manager: The eviction manager, defaults to "memory". It supports "memory" and "redis" and 'no_op_eviction'. :type eviction_manager: str :param get_data_container: a Callable to get the data container, defaults to None. :type get_data_container: Callable :param scalar_params: Params of scalar storage. :type scalar_params: dict :param vector_params: Params of vector storage. :type vector_params: dict :param object_params: Params of object storage. :type object_params: dict :param eviction_params: Params of eviction. :type eviction_params: dict :return: SSDataManager or MapDataManager. Example: .. code-block:: python from gptcache.manager import manager_factory data_manager = manager_factory("sqlite,faiss", data_dir="./workspace", vector_params={"dimension": 128}) # or using manager factory enabled with redis cache instead of in-memory cache from gptcache.manager import manager_factory data_manager = manager_factory("redis,faiss", eviction_manager="redis", scalar_params={"maxmemory": "2mb", "policy": "allkeys-lru" }, vector_params={"dimension": 128}, eviction_params=dict(url="redis://localhost:6379") )
Here is the function:
def manager_factory(manager="map",
data_dir="./",
max_size=1000,
eviction_manager: str = "memory",
get_data_container: Callable = None,
scalar_params=None,
vector_params=None,
object_params=None,
eviction_params=None
):
"""Factory of DataManager.
By using this factory method, you only need to specify the root directory of the data,
and it can automatically manage all the local files.
:param manager: Type of DataManager. Supports: Map, or {scalar_name},{vector_name}
or {scalar_name},{vector_name},{object_name}
:type manager: str
:param data_dir: Root path for data storage.
:type data_dir: str
:param max_size: the max size for the LRU cache in MapDataManager, defaults to 1000.
:type max_size: int
:param eviction_manager: The eviction manager, defaults to "memory".
It supports "memory" and "redis" and 'no_op_eviction'.
:type eviction_manager: str
:param get_data_container: a Callable to get the data container, defaults to None.
:type get_data_container: Callable
:param scalar_params: Params of scalar storage.
:type scalar_params: dict
:param vector_params: Params of vector storage.
:type vector_params: dict
:param object_params: Params of object storage.
:type object_params: dict
:param eviction_params: Params of eviction.
:type eviction_params: dict
:return: SSDataManager or MapDataManager.
Example:
.. code-block:: python
from gptcache.manager import manager_factory
data_manager = manager_factory("sqlite,faiss", data_dir="./workspace", vector_params={"dimension": 128})
# or using manager factory enabled with redis cache instead of in-memory cache
from gptcache.manager import manager_factory
data_manager = manager_factory("redis,faiss",
eviction_manager="redis",
scalar_params={"maxmemory": "2mb",
"policy": "allkeys-lru"
},
vector_params={"dimension": 128},
eviction_params=dict(url="redis://localhost:6379")
)
"""
Path(data_dir).mkdir(parents=True, exist_ok=True)
manager = manager.lower()
if manager == "map":
return MapDataManager(os.path.join(data_dir, "data_map.txt"), max_size, get_data_container)
db_infos = manager.split(",")
if len(db_infos) not in [2, 3]:
raise RuntimeError(
"Error manager format: %s, the correct is \"{scalar},{vector},{object}\", object is optional" % manager)
if len(db_infos) == 2:
db_infos.append("")
scalar, vector, obj = db_infos
if scalar_params is None:
scalar_params = {}
if scalar == "sqlite":
scalar_params["sql_url"] = "sqlite:///" + os.path.join(data_dir, "sqlite.db")
s = CacheBase(name=scalar, **scalar_params)
if vector_params is None:
vector_params = {}
local_vector_type = ["faiss", "hnswlib", "docarray"]
if vector in local_vector_type:
vector_params["index_path"] = os.path.join(data_dir, f"{vector}.index")
elif vector == "milvus" and vector_params.get("local_mode", False) is True:
vector_params["local_data"] = os.path.join(data_dir, "milvus_data")
v = VectorBase(name=vector, **vector_params)
if object_params is None:
object_params = {}
if obj == "local":
object_params["path"] = os.path.join(data_dir, "local_obj")
o = ObjectBase(name=obj, **object_params) if obj else None
if eviction_params is None:
eviction_params = {}
if scalar == "redis" and eviction_manager == "redis":
# if cache manager and eviction manager are both redis, we use no op redis to avoid redundant operations
eviction_manager = "no_op_eviction"
gptcache_log.info("Since Scalar Storage and Eviction manager are both redis, "
"no_op_eviction will be used to avoid redundant operations.")
s.init_eviction_params(
maxmemory=eviction_params.get("maxmemory", scalar_params.get("maxmemory")),
policy=eviction_params.get("policy", scalar_params.get("policy")),
ttl=eviction_params.get("ttl", scalar_params.get("ttl")),
maxmemory_samples=eviction_params.get("maxmemory_samples", scalar_params.get("maxmemory_samples")),
)
e = EvictionBase(
name=eviction_manager,
**eviction_params
)
return get_data_manager(s, v, o, e) | Factory of DataManager. By using this factory method, you only need to specify the root directory of the data, and it can automatically manage all the local files. :param manager: Type of DataManager. Supports: Map, or {scalar_name},{vector_name} or {scalar_name},{vector_name},{object_name} :type manager: str :param data_dir: Root path for data storage. :type data_dir: str :param max_size: the max size for the LRU cache in MapDataManager, defaults to 1000. :type max_size: int :param eviction_manager: The eviction manager, defaults to "memory". It supports "memory" and "redis" and 'no_op_eviction'. :type eviction_manager: str :param get_data_container: a Callable to get the data container, defaults to None. :type get_data_container: Callable :param scalar_params: Params of scalar storage. :type scalar_params: dict :param vector_params: Params of vector storage. :type vector_params: dict :param object_params: Params of object storage. :type object_params: dict :param eviction_params: Params of eviction. :type eviction_params: dict :return: SSDataManager or MapDataManager. Example: .. code-block:: python from gptcache.manager import manager_factory data_manager = manager_factory("sqlite,faiss", data_dir="./workspace", vector_params={"dimension": 128}) # or using manager factory enabled with redis cache instead of in-memory cache from gptcache.manager import manager_factory data_manager = manager_factory("redis,faiss", eviction_manager="redis", scalar_params={"maxmemory": "2mb", "policy": "allkeys-lru" }, vector_params={"dimension": 128}, eviction_params=dict(url="redis://localhost:6379") ) |
21,838 | from datetime import datetime
from typing import List, Optional, Dict
import numpy as np
from gptcache.manager.scalar_data.base import (
CacheStorage,
CacheData,
Question,
QuestionDep,
)
from gptcache.utils import import_sqlalchemy
import sqlalchemy
from sqlalchemy import func, create_engine, Column, Sequence
from sqlalchemy.types import (
String,
DateTime,
LargeBinary,
Integer,
Float,
)
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
def _get_table_len(config: Dict, column_alias: str) -> int:
if config and column_alias in config and config[column_alias] > 0:
return config[column_alias]
return DEFAULT_LEN_DOCT.get(column_alias, 1000)
def get_models(table_prefix, db_type, table_len_config):
DynamicBase = declarative_base(class_registry={}) # pylint: disable=C0103
class QuestionTable(DynamicBase):
"""
question table
"""
__tablename__ = table_prefix + "_question"
__table_args__ = {"extend_existing": True}
if db_type in ("oracle", "duckdb"):
question_id_seq = Sequence(f"{__tablename__}_id_seq", start=1)
id = Column(Integer, question_id_seq, primary_key=True, autoincrement=True)
else:
id = Column(Integer, primary_key=True, autoincrement=True)
question = Column(
String(_get_table_len(table_len_config, "question_question")),
nullable=False,
)
create_on = Column(DateTime, default=datetime.now)
last_access = Column(DateTime, default=datetime.now)
embedding_data = Column(LargeBinary, nullable=True)
deleted = Column(Integer, default=0)
class AnswerTable(DynamicBase):
"""
answer table
"""
__tablename__ = table_prefix + "_answer"
__table_args__ = {"extend_existing": True}
if db_type in ("oracle", "duckdb"):
answer_id_seq = Sequence(f"{__tablename__}_id_seq")
id = Column(Integer, answer_id_seq, primary_key=True, autoincrement=True)
else:
id = Column(Integer, primary_key=True, autoincrement=True)
question_id = Column(Integer, nullable=False)
answer = Column(
String(_get_table_len(table_len_config, "answer_answer")), nullable=False
)
answer_type = Column(Integer, nullable=False)
class SessionTable(DynamicBase):
"""
session table
"""
__tablename__ = table_prefix + "_session"
__table_args__ = {"extend_existing": True}
if db_type in ("oracle", "duckdb"):
session_id_seq = Sequence(f"{__tablename__}_id_seq", start=1)
id = Column(
Integer,
session_id_seq,
primary_key=True,
autoincrement=True,
)
else:
id = Column(Integer, primary_key=True, autoincrement=True)
question_id = Column(Integer, nullable=False)
session_id = Column(
String(_get_table_len(table_len_config, "session_id")), nullable=False
)
session_question = Column(
String(_get_table_len(table_len_config, "question_question")),
nullable=False,
)
class QuestionDepTable(DynamicBase):
"""
answer table
"""
__tablename__ = table_prefix + "_question_dep"
__table_args__ = {"extend_existing": True}
if db_type in ("oracle", "duckdb"):
question_dep_id_seq = Sequence(f"{__tablename__}_id_seq", start=1)
id = Column(
Integer, question_dep_id_seq, primary_key=True, autoincrement=True
)
else:
id = Column(Integer, primary_key=True, autoincrement=True)
question_id = Column(Integer, nullable=False)
dep_name = Column(
String(_get_table_len(table_len_config, "dep_name")), nullable=False
)
dep_data = Column(
String(_get_table_len(table_len_config, "dep_data")), nullable=False
)
dep_type = Column(Integer, nullable=False)
class ReportTable(DynamicBase):
"""
report table
"""
__tablename__ = table_prefix + "_report"
__table_args__ = {"extend_existing": True}
if db_type in ("oracle", "duckdb"):
question_dep_id_seq = Sequence(f"{__tablename__}_id_seq", start=1)
id = Column(
Integer, question_dep_id_seq, primary_key=True, autoincrement=True
)
else:
id = Column(Integer, primary_key=True, autoincrement=True)
user_question = Column(
String(_get_table_len(table_len_config, "question_question")),
nullable=False,
)
cache_question_id = Column(
Integer,
nullable=False,
)
cache_question = Column(
String(_get_table_len(table_len_config, "question_question")),
nullable=False,
)
cache_answer = Column(
String(_get_table_len(table_len_config, "answer_answer")), nullable=False
)
similarity = Column(Float, nullable=False)
cache_delta_time = Column(Float, nullable=False)
cache_time = Column(DateTime, default=datetime.now)
extra = Column(
String(_get_table_len(table_len_config, "question_question")),
nullable=True,
)
return QuestionTable, AnswerTable, QuestionDepTable, SessionTable, ReportTable | null |
21,839 | import datetime
from typing import List, Optional
import numpy as np
from gptcache.manager.scalar_data.base import (
CacheStorage,
CacheData,
Question,
QuestionDep,
)
from gptcache.utils import import_redis
from redis import Redis
from redis.client import Pipeline
from redis_om import get_redis_connection
from redis_om import JsonModel, EmbeddedJsonModel, NotFoundError, Field, Migrator
The provided code snippet includes necessary dependencies for implementing the `get_models` function. Write a Python function `def get_models(global_key: str, redis_connection: Redis)` to solve the following problem:
Get all the models for the given global key and redis connection. :param global_key: Global key will be used as a prefix for all the keys :type global_key: str :param redis_connection: Redis connection to use for all the models. Note: This needs to be explicitly mentioned in `Meta` class for each Object Model, otherwise it will use the default connection from the pool. :type redis_connection: Redis
Here is the function:
def get_models(global_key: str, redis_connection: Redis):
"""
Get all the models for the given global key and redis connection.
:param global_key: Global key will be used as a prefix for all the keys
:type global_key: str
:param redis_connection: Redis connection to use for all the models.
Note: This needs to be explicitly mentioned in `Meta` class for each Object Model,
otherwise it will use the default connection from the pool.
:type redis_connection: Redis
"""
class Counter:
"""
counter collection
"""
key_name = global_key + ":counter"
database = redis_connection
@classmethod
def incr(cls):
cls.database.incr(cls.key_name)
@classmethod
def get(cls):
return cls.database.get(cls.key_name)
class EmbeddingType:
"""
Directly using bytes for embedding data is not supported by redis-om as of now.
Custom type for embedding data. This will be stored as bytes in redis.
Latin-1 encoding is used to convert the bytes to string and vice versa.
"""
def __init__(self, data: bytes):
self.data = data
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v: [np.array, bytes]):
if isinstance(v, np.ndarray):
return cls(v.astype(np.float32).tobytes())
elif isinstance(v, bytes):
return cls(v)
return cls(v)
def to_numpy(self) -> np.ndarray:
return np.frombuffer(self.data.encode("latin-1"), dtype=np.float32)
def __repr__(self):
return f"{self.data}"
class Answers(EmbeddedJsonModel):
"""
answer collection
"""
answer: str
answer_type: int
class Meta:
database = redis_connection
class QuestionDeps(EmbeddedJsonModel):
"""
Question Dep collection
"""
dep_name: str
dep_data: str
dep_type: int
class Questions(JsonModel):
"""
questions collection
"""
question: str = Field(index=True)
create_on: datetime.datetime
last_access: datetime.datetime
deleted: int = Field(index=True)
answers: List[Answers]
deps: List[QuestionDeps]
embedding: EmbeddingType
class Meta:
global_key_prefix = global_key
model_key_prefix = "questions"
database = redis_connection
class Config:
json_encoders = {
EmbeddingType: lambda n: n.data.decode("latin-1")
if isinstance(n.data, bytes) else n.data
}
class Sessions(JsonModel):
"""
session collection
"""
class Meta:
global_key_prefix = global_key
model_key_prefix = "sessions"
database = redis_connection
session_id: str = Field(index=True)
session_question: str
question_id: str = Field(index=True)
class Report(JsonModel):
"""
Report collection
"""
class Meta:
global_key_prefix = global_key
model_key_prefix = "report"
database = redis_connection
user_question: str
cache_question_id: int = Field(index=True)
cache_question: str
cache_answer: str
similarity: float = Field(index=True)
cache_delta_time: float = Field(index=True)
cache_time: datetime.datetime = Field(index=True)
extra: Optional[str]
return Questions, Answers, QuestionDeps, Sessions, Counter, Report | Get all the models for the given global key and redis connection. :param global_key: Global key will be used as a prefix for all the keys :type global_key: str :param redis_connection: Redis connection to use for all the models. Note: This needs to be explicitly mentioned in `Meta` class for each Object Model, otherwise it will use the default connection from the pool. :type redis_connection: Redis |
21,840 | from datetime import datetime
from typing import List, Optional
import numpy as np
from gptcache.manager.scalar_data.base import (
CacheStorage,
CacheData,
Question,
QuestionDep,
)
from gptcache.utils import import_mongodb
from mongoengine import Document
from mongoengine import fields
import mongoengine as me
def get_models():
class Questions(Document):
"""
questions collection
"""
meta = {"collection": "questions", "indexes": ["deleted"]}
_id = fields.SequenceField()
question = fields.StringField()
create_on = fields.DateTimeField(default=datetime.now())
last_access = fields.DateTimeField(default=datetime.now())
embedding_data = fields.BinaryField()
deleted = fields.IntField(default=0)
@property
def oid(self):
return self._id
class Answers(Document):
"""
answer collection
"""
_id = fields.SequenceField()
meta = {"collection": "answers", "indexes": ["question_id"]}
answer = fields.StringField()
answer_type = fields.IntField()
question_id = fields.IntField()
@property
def oid(self):
return self._id
class Sessions(Document):
"""
session collection
"""
meta = {"collection": "sessions", "indexes": ["question_id"]}
_id = fields.SequenceField()
session_id = fields.StringField()
session_question = fields.StringField()
question_id = fields.IntField()
@property
def oid(self):
return self._id
class QuestionDeps(Document):
"""
Question Dep collection
"""
meta = {"collection": "question_deps", "indexes": ["question_id"]}
_id = fields.SequenceField()
question_id = fields.IntField()
dep_name = fields.StringField()
dep_data = fields.StringField()
dep_type = fields.IntField()
@property
def oid(self):
return self._id
class Report(Document):
"""
Report
"""
meta = {
"collection": "report",
"indexes": ["cache_question_id", "similarity", "cache_delta_time"],
}
_id = fields.SequenceField()
user_question = fields.StringField()
cache_question_id = fields.IntField()
cache_question = fields.StringField()
cache_answer = fields.StringField()
similarity = fields.FloatField()
cache_delta_time = fields.FloatField()
cache_time = fields.DateTimeField(default=datetime.now())
extra = fields.StringField()
@property
def oid(self):
return self._id
return Questions, Answers, QuestionDeps, Sessions, Report | null |
21,841 |
def cache_all(*_, **__):
return True | null |
21,842 | import subprocess
from gptcache.utils.error import PipInstallError
from gptcache.utils.log import gptcache_log
class PipInstallError(CacheError):
"""Raise when failed to install package."""
def __init__(self, package):
super().__init__(f"Ran into error installing {package}.")
gptcache_log = logging.getLogger(f'gptcache:{gptcache.__version__}')
The provided code snippet includes necessary dependencies for implementing the `prompt_install` function. Write a Python function `def prompt_install(package: str, warn: bool = False)` to solve the following problem:
Function used to prompt user to install a package.
Here is the function:
def prompt_install(package: str, warn: bool = False): # pragma: no cover
"""
Function used to prompt user to install a package.
"""
cmd = f"pip install -q {package}"
try:
if warn and input(f"Install {package}? Y/n: ") != "Y":
raise ModuleNotFoundError(f"No module named {package}")
print(f"start to install package: {package}")
subprocess.check_call(cmd, shell=True)
print(f"successfully installed package: {package}")
gptcache_log.info("%s installed successfully!", package)
except subprocess.CalledProcessError as e:
raise PipInstallError(package) from e | Function used to prompt user to install a package. |
21,843 | import base64
import requests
def get_message_from_openai_answer(openai_resp):
return openai_resp["choices"][0]["message"]["content"] | null |
21,844 | import base64
import requests
def get_stream_message_from_openai_answer(openai_data):
return openai_data["choices"][0]["delta"].get("content", "") | null |
21,845 | import base64
import requests
def get_text_from_openai_answer(openai_resp):
return openai_resp["choices"][0]["text"] | null |
21,846 | import base64
import requests
def get_image_from_openai_b64(openai_resp):
return openai_resp["data"][0]["b64_json"] | null |
21,847 | import base64
import requests
def get_image_from_openai_url(openai_resp):
url = openai_resp["data"][0]["url"]
img_content = requests.get(url).content
img_data = base64.b64encode(img_content)
return img_data | null |
21,848 | import base64
import requests
def get_image_from_path(openai_resp):
img_path = openai_resp["data"][0]["url"]
with open(img_path, "rb") as f:
img_data = base64.b64encode(f.read())
return img_data | null |
21,849 | import base64
import requests
def get_audio_text_from_openai_answer(openai_resp):
return openai_resp["text"] | null |
21,850 | class CacheError(Exception):
"""GPTCache base error"""
The provided code snippet includes necessary dependencies for implementing the `wrap_error` function. Write a Python function `def wrap_error(e: Exception) -> Exception` to solve the following problem:
Add a type to exception `e` while ensuring that the original type is not changed Example: .. code-block:: python import openai from gptcache.utils.error import wrap_error def raise_error(): try: raise openai.error.OpenAIError(message="test") except openai.error.OpenAIError as e: raise wrap_error(e) try: raise_error() except openai.error.OpenAIError as e: print("exception:") print(e) print("over")
Here is the function:
def wrap_error(e: Exception) -> Exception:
"""Add a type to exception `e` while ensuring that the original type is not changed
Example:
.. code-block:: python
import openai
from gptcache.utils.error import wrap_error
def raise_error():
try:
raise openai.error.OpenAIError(message="test")
except openai.error.OpenAIError as e:
raise wrap_error(e)
try:
raise_error()
except openai.error.OpenAIError as e:
print("exception:")
print(e)
print("over")
"""
e.__class__ = type(e.__class__.__name__, (CacheError, e.__class__), {})
return e | Add a type to exception `e` while ensuring that the original type is not changed Example: .. code-block:: python import openai from gptcache.utils.error import wrap_error def raise_error(): try: raise openai.error.OpenAIError(message="test") except openai.error.OpenAIError as e: raise wrap_error(e) try: raise_error() except openai.error.OpenAIError as e: print("exception:") print(e) print("over") |
21,851 | import argparse
import json
import os
import zipfile
from typing import Optional
from gptcache import cache, Cache
from gptcache.adapter import openai
from gptcache.adapter.api import (
get,
put,
init_similar_cache,
init_similar_cache_from_config,
)
from gptcache.processor.pre import last_content
from gptcache.utils import import_fastapi, import_pydantic, import_starlette
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import FileResponse
import uvicorn
from pydantic import BaseModel
async def hello():
return "hello gptcache server" | null |
21,852 | import argparse
import json
import os
import zipfile
from typing import Optional
from gptcache import cache, Cache
from gptcache.adapter import openai
from gptcache.adapter.api import (
get,
put,
init_similar_cache,
init_similar_cache_from_config,
)
from gptcache.processor.pre import last_content
from gptcache.utils import import_fastapi, import_pydantic, import_starlette
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import FileResponse
import uvicorn
from pydantic import BaseModel
class CacheData(BaseModel):
prompt: str
answer: Optional[str] = ""
def put(prompt: str, data: Any, **kwargs) -> None:
"""put api, put qa pair information to GPTCache
Please make sure that the `pre_embedding_func` param is `get_prompt` when initializing the cache
:param prompt: the cache data key, usually question text
:type prompt: str
:param data: the cache data value, usually answer text
:type data: Any
:param kwargs: list of user-defined parameters
:type kwargs: Dict
Example:
.. code-block:: python
from gptcache.adapter.api import put
from gptcache.processor.pre import get_prompt
cache.init(pre_embedding_func=get_prompt)
put("hello", "foo")
"""
def llm_handle(*llm_args, **llm_kwargs): # pylint: disable=W0613
return data
adapt(
llm_handle,
_cache_data_converter,
_update_cache_callback,
cache_skip=True,
prompt=prompt,
**kwargs,
)
async def put_cache(cache_data: CacheData) -> str:
put(cache_data.prompt, cache_data.answer)
return "successfully update the cache" | null |
21,853 | import argparse
import json
import os
import zipfile
from typing import Optional
from gptcache import cache, Cache
from gptcache.adapter import openai
from gptcache.adapter.api import (
get,
put,
init_similar_cache,
init_similar_cache_from_config,
)
from gptcache.processor.pre import last_content
from gptcache.utils import import_fastapi, import_pydantic, import_starlette
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import FileResponse
import uvicorn
from pydantic import BaseModel
class CacheData(BaseModel):
prompt: str
answer: Optional[str] = ""
def get(prompt: str, **kwargs) -> Any:
"""get api, get the cache data according to the `prompt`
Please make sure that the `pre_embedding_func` param is `get_prompt` when initializing the cache
:param prompt: the cache data key, usually question text
:type prompt: str
:param kwargs: list of user-defined parameters
:type kwargs: Dict
Example:
.. code-block:: python
from gptcache.adapter.api import put, get
from gptcache.processor.pre import get_prompt
cache.init(pre_embedding_func=get_prompt)
put("hello", "foo")
print(get("hello"))
"""
res = adapt(
_llm_handle_none,
_cache_data_converter,
_update_cache_callback_none,
prompt=prompt,
**kwargs,
)
return res
async def get_cache(cache_data: CacheData) -> CacheData:
result = get(cache_data.prompt)
return CacheData(prompt=cache_data.prompt, answer=result) | null |
21,854 | import argparse
import json
import os
import zipfile
from typing import Optional
from gptcache import cache, Cache
from gptcache.adapter import openai
from gptcache.adapter.api import (
get,
put,
init_similar_cache,
init_similar_cache_from_config,
)
from gptcache.processor.pre import last_content
from gptcache.utils import import_fastapi, import_pydantic, import_starlette
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import FileResponse
import uvicorn
from pydantic import BaseModel
async def flush_cache() -> str:
cache.flush()
return "successfully flush the cache" | null |
21,855 | import argparse
import json
import os
import zipfile
from typing import Optional
from gptcache import cache, Cache
from gptcache.adapter import openai
from gptcache.adapter.api import (
get,
put,
init_similar_cache,
init_similar_cache_from_config,
)
from gptcache.processor.pre import last_content
from gptcache.utils import import_fastapi, import_pydantic, import_starlette
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import FileResponse
import uvicorn
from pydantic import BaseModel
cache_dir = ""
cache_file_key = ""
async def get_cache_file(key: str = "") -> FileResponse:
global cache_dir
global cache_file_key
if cache_dir == "":
raise HTTPException(
status_code=403,
detail="the cache_dir was not specified when the service was initialized",
)
if cache_file_key == "":
raise HTTPException(
status_code=403,
detail="the cache file can't be downloaded because the cache-file-key was not specified",
)
if cache_file_key != key:
raise HTTPException(status_code=403, detail="the cache file key is wrong")
zip_filename = cache_dir + ".zip"
with zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) as zipf:
for root, _, files in os.walk(cache_dir):
for file in files:
zipf.write(os.path.join(root, file))
return FileResponse(zip_filename) | null |
21,856 | import argparse
import json
import os
import zipfile
from typing import Optional
from gptcache import cache, Cache
from gptcache.adapter import openai
from gptcache.adapter.api import (
get,
put,
init_similar_cache,
init_similar_cache_from_config,
)
from gptcache.processor.pre import last_content
from gptcache.utils import import_fastapi, import_pydantic, import_starlette
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import FileResponse
import uvicorn
from pydantic import BaseModel
openai_cache: Optional[Cache] = None
import openai
def get(prompt: str, **kwargs) -> Any:
"""get api, get the cache data according to the `prompt`
Please make sure that the `pre_embedding_func` param is `get_prompt` when initializing the cache
:param prompt: the cache data key, usually question text
:type prompt: str
:param kwargs: list of user-defined parameters
:type kwargs: Dict
Example:
.. code-block:: python
from gptcache.adapter.api import put, get
from gptcache.processor.pre import get_prompt
cache.init(pre_embedding_func=get_prompt)
put("hello", "foo")
print(get("hello"))
"""
res = adapt(
_llm_handle_none,
_cache_data_converter,
_update_cache_callback_none,
prompt=prompt,
**kwargs,
)
return res
def import_starlette():
_check_library("starlette")
async def chat(request: Request):
if openai_cache is None:
raise HTTPException(
status_code=500,
detail=f"the gptcache server doesn't open the openai completes proxy",
)
import_starlette()
from starlette.responses import StreamingResponse, JSONResponse
openai_params = await request.json()
is_stream = openai_params.get("stream", False)
headers = request.headers
auth_header = headers.get("authorization", None)
openai_key = auth_header.split(" ")[1] if auth_header else ""
cache_skip = openai_params.pop("cache_skip", False)
if cache_skip is False:
messages = openai_params.get("messages")
if "/cache_skip " in messages[0]["content"]:
cache_skip = True
content0 = openai_params.get("messages")[0]["content"]
openai_params.get("messages")[0]["content"] = str(content0).replace("/cache_skip ", "")
elif "/cache_skip " in messages[-1]["content"]:
cache_skip = True
content0 = openai_params.get("messages")[-1]["content"]
openai_params.get("messages")[-1]["content"] = str(content0).replace("/cache_skip ", "")
print("cache_skip:", cache_skip)
print("messages:", openai_params.get("messages"))
try:
if is_stream:
def generate():
for stream_response in openai.ChatCompletion.create(
cache_obj=openai_cache,
cache_skip=cache_skip,
api_key=openai_key,
**openai_params,
):
if stream_response == "[DONE]":
yield "data: [DONE]\n\n"
break
yield f"data: {json.dumps(stream_response)}\n\n"
return StreamingResponse(generate(), media_type="text/event-stream")
else:
openai_response = openai.ChatCompletion.create(
cache_obj=openai_cache,
cache_skip=cache_skip,
api_key=openai_key,
**openai_params,
)
return JSONResponse(content=openai_response)
except Exception as e:
raise HTTPException(status_code=500, detail=f"openai error: {e}") | null |
21,857 | import cv2
import random
import numpy as np
import argparse
from DRL.evaluator import Evaluator
from utils.util import *
from utils.tensorboard import TensorBoard
import time
writer = TensorBoard('../train_log/{}'.format(exp))
def train(agent, env, evaluate):
train_times = args.train_times
env_batch = args.env_batch
validate_interval = args.validate_interval
max_step = args.max_step
debug = args.debug
episode_train_times = args.episode_train_times
resume = args.resume
output = args.output
time_stamp = time.time()
step = episode = episode_steps = 0
tot_reward = 0.
observation = None
noise_factor = args.noise_factor
while step <= train_times:
step += 1
episode_steps += 1
# reset if it is the start of episode
if observation is None:
observation = env.reset()
agent.reset(observation, noise_factor)
action = agent.select_action(observation, noise_factor=noise_factor)
observation, reward, done, _ = env.step(action)
agent.observe(reward, observation, done, step)
if (episode_steps >= max_step and max_step):
if step > args.warmup:
# [optional] evaluate
if episode > 0 and validate_interval > 0 and episode % validate_interval == 0:
reward, dist = evaluate(env, agent.select_action, debug=debug)
if debug: prRed('Step_{:07d}: mean_reward:{:.3f} mean_dist:{:.3f} var_dist:{:.3f}'.format(step - 1, np.mean(reward), np.mean(dist), np.var(dist)))
writer.add_scalar('validate/mean_reward', np.mean(reward), step)
writer.add_scalar('validate/mean_dist', np.mean(dist), step)
writer.add_scalar('validate/var_dist', np.var(dist), step)
agent.save_model(output)
train_time_interval = time.time() - time_stamp
time_stamp = time.time()
tot_Q = 0.
tot_value_loss = 0.
if step > args.warmup:
if step < 10000 * max_step:
lr = (3e-4, 1e-3)
elif step < 20000 * max_step:
lr = (1e-4, 3e-4)
else:
lr = (3e-5, 1e-4)
for i in range(episode_train_times):
Q, value_loss = agent.update_policy(lr)
tot_Q += Q.data.cpu().numpy()
tot_value_loss += value_loss.data.cpu().numpy()
writer.add_scalar('train/critic_lr', lr[0], step)
writer.add_scalar('train/actor_lr', lr[1], step)
writer.add_scalar('train/Q', tot_Q / episode_train_times, step)
writer.add_scalar('train/critic_loss', tot_value_loss / episode_train_times, step)
if debug: prBlack('#{}: steps:{} interval_time:{:.2f} train_time:{:.2f}' \
.format(episode, step, train_time_interval, time.time()-time_stamp))
time_stamp = time.time()
# reset
observation = None
episode_steps = 0
episode += 1 | null |
21,858 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, SGD
from Renderer.model import *
from DRL.rpm import rpm
from DRL.actor import *
from DRL.critic import *
from DRL.wgan import *
from utils.util import *
for i in range(128):
for j in range(128):
coord[0, 0, i, j] = i / 127.
coord[0, 1, i, j] = j / 127.
Decoder = FCN()
Decoder.load_state_dict(torch.load('../renderer.pkl'))
def decode(x, canvas): # b * (10 + 3)
x = x.view(-1, 10 + 3)
stroke = 1 - Decoder(x[:, :10])
stroke = stroke.view(-1, 128, 128, 1)
color_stroke = stroke * x[:, -3:].view(-1, 1, 1, 3)
stroke = stroke.permute(0, 3, 1, 2)
color_stroke = color_stroke.permute(0, 3, 1, 2)
stroke = stroke.view(-1, 5, 1, 128, 128)
color_stroke = color_stroke.view(-1, 5, 3, 128, 128)
for i in range(5):
canvas = canvas * (1 - stroke[:, i]) + color_stroke[:, i]
return canvas | null |
21,859 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, SGD
from Renderer.model import *
from DRL.rpm import rpm
from DRL.actor import *
from DRL.critic import *
from DRL.wgan import *
from utils.util import *
def cal_trans(s, t):
return (s.transpose(0, 3) * t).transpose(0, 3) | null |
21,860 | import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam, SGD
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torch.nn.utils.weight_norm as weightNorm
from utils.util import *
target_netD = Discriminator()
target_netD = target_netD.to(device)
def cal_reward(fake_data, real_data):
return target_netD(torch.cat([real_data, fake_data], 1)) | null |
21,861 | import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam, SGD
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torch.nn.utils.weight_norm as weightNorm
from utils.util import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
netD = Discriminator()
netD = netD.to(device)
def save_gan(path):
netD.cpu()
torch.save(netD.state_dict(),'{}/wgan.pkl'.format(path))
netD.to(device) | null |
21,862 | import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam, SGD
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torch.nn.utils.weight_norm as weightNorm
from utils.util import *
netD = Discriminator()
netD = netD.to(device)
def load_gan(path):
netD.load_state_dict(torch.load('{}/wgan.pkl'.format(path))) | null |
21,863 | import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam, SGD
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torch.nn.utils.weight_norm as weightNorm
from utils.util import *
netD = Discriminator()
target_netD = Discriminator()
netD = netD.to(device)
target_netD = target_netD.to(device)
optimizerD = Adam(netD.parameters(), lr=3e-4, betas=(0.5, 0.999))
def cal_gradient_penalty(netD, real_data, fake_data, batch_size):
alpha = torch.rand(batch_size, 1)
alpha = alpha.expand(batch_size, int(real_data.nelement()/batch_size)).contiguous()
alpha = alpha.view(batch_size, 6, dim, dim)
alpha = alpha.to(device)
fake_data = fake_data.view(batch_size, 6, dim, dim)
interpolates = Variable(alpha * real_data.data + ((1 - alpha) * fake_data.data), requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(disc_interpolates, interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
def update(fake_data, real_data):
fake_data = fake_data.detach()
real_data = real_data.detach()
fake = torch.cat([real_data, fake_data], 1)
real = torch.cat([real_data, real_data], 1)
D_real = netD(real)
D_fake = netD(fake)
gradient_penalty = cal_gradient_penalty(netD, real, fake, real.shape[0])
optimizerD.zero_grad()
D_cost = D_fake.mean() - D_real.mean() + gradient_penalty
D_cost.backward()
optimizerD.step()
soft_update(target_netD, netD, 0.001)
return D_fake.mean(), D_real.mean(), gradient_penalty | null |
21,864 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
from torch.autograd import Variable
import sys
def conv3x3(in_planes, out_planes, stride=1):
return (nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)) | null |
21,865 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
from torch.autograd import Variable
import sys
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = (nn.Conv2d(in_planes, planes, kernel_size=1, bias=False))
self.conv2 = (nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False))
self.conv3 = (nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False))
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
def cfg(depth):
depth_lst = [18, 34, 50, 101, 152]
assert (depth in depth_lst), "Error : Resnet depth should be either 18, 34, 50, 101, 152"
cf_dict = {
'18': (BasicBlock, [2,2,2,2]),
'34': (BasicBlock, [3,4,6,3]),
'50': (Bottleneck, [3,4,6,3]),
'101':(Bottleneck, [3,4,23,3]),
'152':(Bottleneck, [3,8,36,3]),
}
return cf_dict[str(depth)] | null |
21,866 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
from torch.autograd import Variable
import sys
def conv3x3(in_planes, out_planes, stride=1):
return weightNorm(nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)) | null |
21,867 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
from torch.autograd import Variable
import sys
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.conv2 = conv3x3(planes, planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
weightNorm(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=True)),
)
self.relu_1 = TReLU()
self.relu_2 = TReLU()
def forward(self, x):
out = self.relu_1(self.conv1(x))
out = self.conv2(out)
out += self.shortcut(x)
out = self.relu_2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = weightNorm(nn.Conv2d(in_planes, planes, kernel_size=1, bias=True))
self.conv2 = weightNorm(nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True))
self.conv3 = weightNorm(nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=True))
self.relu_1 = TReLU()
self.relu_2 = TReLU()
self.relu_3 = TReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
weightNorm(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=True)),
)
def forward(self, x):
out = self.relu_1(self.conv1(x))
out = self.relu_2(self.conv2(out))
out = self.conv3(out)
out += self.shortcut(x)
out = self.relu_3(out)
return out
def cfg(depth):
depth_lst = [18, 34, 50, 101, 152]
assert (depth in depth_lst), "Error : Resnet depth should be either 18, 34, 50, 101, 152"
cf_dict = {
'18': (BasicBlock, [2,2,2,2]),
'34': (BasicBlock, [3,4,6,3]),
'50': (Bottleneck, [3,4,6,3]),
'101':(Bottleneck, [3,4,23,3]),
'152':(Bottleneck, [3,8,36,3]),
}
return cf_dict[str(depth)] | null |
21,868 | import cv2
import numpy as np
def normal(x, width):
return (int)(x * (width - 1) + 0.5)
def draw(f, width=128):
x0, y0, x1, y1, x2, y2, z0, z2, w0, w2 = f
x1 = x0 + (x2 - x0) * x1
y1 = y0 + (y2 - y0) * y1
x0 = normal(x0, width * 2)
x1 = normal(x1, width * 2)
x2 = normal(x2, width * 2)
y0 = normal(y0, width * 2)
y1 = normal(y1, width * 2)
y2 = normal(y2, width * 2)
z0 = (int)(1 + z0 * width // 2)
z2 = (int)(1 + z2 * width // 2)
canvas = np.zeros([width * 2, width * 2]).astype('float32')
tmp = 1. / 100
for i in range(100):
t = i * tmp
x = (int)((1-t) * (1-t) * x0 + 2 * t * (1-t) * x1 + t * t * x2)
y = (int)((1-t) * (1-t) * y0 + 2 * t * (1-t) * y1 + t * t * y2)
z = (int)((1-t) * z0 + t * z2)
w = (1-t) * w0 + t * w2
cv2.circle(canvas, (y, x), z, w, -1)
return 1 - cv2.resize(canvas, dsize=(width, width)) | null |
21,869 | import cv2
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from utils.tensorboard import TensorBoard
from Renderer.model import FCN
from Renderer.stroke_gen import *
import torch.optim as optim
net = FCN()
use_cuda = torch.cuda.is_available()
def save_model():
if use_cuda:
net.cpu()
torch.save(net.state_dict(), "../renderer.pkl")
if use_cuda:
net.cuda() | null |
21,870 | import cv2
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from utils.tensorboard import TensorBoard
from Renderer.model import FCN
from Renderer.stroke_gen import *
import torch.optim as optim
net = FCN()
def load_weights():
pretrained_dict = torch.load("../renderer.pkl")
model_dict = net.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict) | null |
21,871 | import os
import torch
from torch.autograd import Variable
def prRed(prt): print("\033[91m {}\033[00m" .format(prt)) | null |
21,872 | import os
import torch
from torch.autograd import Variable
def prGreen(prt): print("\033[92m {}\033[00m" .format(prt)) | null |
21,873 | import os
import torch
from torch.autograd import Variable
def prYellow(prt): print("\033[93m {}\033[00m" .format(prt)) | null |
21,874 | import os
import torch
from torch.autograd import Variable
def prLightPurple(prt): print("\033[94m {}\033[00m" .format(prt)) | null |
21,875 | import os
import torch
from torch.autograd import Variable
def prPurple(prt): print("\033[95m {}\033[00m" .format(prt)) | null |
21,876 | import os
import torch
from torch.autograd import Variable
def prCyan(prt): print("\033[96m {}\033[00m" .format(prt)) | null |
21,877 | import os
import torch
from torch.autograd import Variable
def prLightGray(prt): print("\033[97m {}\033[00m" .format(prt)) | null |
21,878 | import os
import torch
from torch.autograd import Variable
def prBlack(prt): print("\033[98m {}\033[00m" .format(prt)) | null |
21,879 | import os
import torch
from torch.autograd import Variable
USE_CUDA = torch.cuda.is_available()
def to_numpy(var):
return var.cpu().data.numpy() if USE_CUDA else var.data.numpy() | null |
21,880 | import os
import torch
from torch.autograd import Variable
def to_tensor(ndarray, device):
return torch.tensor(ndarray, dtype=torch.float, device=device) | null |
21,881 | import os
import torch
from torch.autograd import Variable
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
) | null |
21,882 | import os
import torch
from torch.autograd import Variable
def hard_update(target, source):
for m1, m2 in zip(target.modules(), source.modules()):
m1._buffers = m2._buffers.copy()
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data) | null |
21,883 | import os
import torch
from torch.autograd import Variable
The provided code snippet includes necessary dependencies for implementing the `get_output_folder` function. Write a Python function `def get_output_folder(parent_dir, env_name)` to solve the following problem:
Return save folder. Assumes folders in the parent_dir have suffix -run{run number}. Finds the highest run number and sets the output folder to that number + 1. This is just convenient so that if you run the same script multiple times tensorboard can plot all of the results on the same plots with different names. Parameters ---------- parent_dir: str Path of the directory containing all experiment runs. Returns ------- parent_dir/run_dir Path to this run's save directory.
Here is the function:
def get_output_folder(parent_dir, env_name):
"""Return save folder.
Assumes folders in the parent_dir have suffix -run{run
number}. Finds the highest run number and sets the output folder
to that number + 1. This is just convenient so that if you run the
same script multiple times tensorboard can plot all of the results
on the same plots with different names.
Parameters
----------
parent_dir: str
Path of the directory containing all experiment runs.
Returns
-------
parent_dir/run_dir
Path to this run's save directory.
"""
os.makedirs(parent_dir, exist_ok=True)
experiment_id = 0
for folder_name in os.listdir(parent_dir):
if not os.path.isdir(os.path.join(parent_dir, folder_name)):
continue
try:
folder_name = int(folder_name.split('-run')[-1])
if folder_name > experiment_id:
experiment_id = folder_name
except:
pass
experiment_id += 1
parent_dir = os.path.join(parent_dir, env_name)
parent_dir = parent_dir + '-run{}'.format(experiment_id)
os.makedirs(parent_dir, exist_ok=True)
return parent_dir | Return save folder. Assumes folders in the parent_dir have suffix -run{run number}. Finds the highest run number and sets the output folder to that number + 1. This is just convenient so that if you run the same script multiple times tensorboard can plot all of the results on the same plots with different names. Parameters ---------- parent_dir: str Path of the directory containing all experiment runs. Returns ------- parent_dir/run_dir Path to this run's save directory. |
21,890 | import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam, SGD
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torch.nn.utils.weight_norm as weightNorm
from utils.util import *
netD = Discriminator()
target_netD = Discriminator()
netD = netD.to(device)
target_netD = target_netD.to(device)
optimizerD = Adam(netD.parameters(), lr=3e-4, betas=(0.5, 0.999))
def cal_gradient_penalty(netD, real_data, fake_data, batch_size):
def update(fake_data, real_data):
fake_data = fake_data.detach()
real_data = real_data.detach()
fake = torch.cat([real_data, fake_data], 1)
real = torch.cat([real_data, real_data], 1)
D_real = netD(real)
D_fake = netD(fake)
gradient_penalty = cal_gradient_penalty(netD, real, fake, real.shape[0])
optimizerD.zero_grad()
D_cost = D_fake.mean() - D_real.mean() + gradient_penalty
D_cost.backward()
optimizerD.step()
soft_update(target_netD, netD, 0.001)
return D_fake.mean(), D_real.mean(), gradient_penalty | null |
21,911 | import argparse
import tempfile
from pathlib import Path
import cog
import cv2
import imageio
from baseline.DRL.actor import *
from baseline.Renderer.model import *
def decode(x, canvas, Decoder, width): # b * (10 + 3)
x = x.view(-1, 10 + 3)
stroke = 1 - Decoder(x[:, :10])
stroke = stroke.view(-1, width, width, 1)
color_stroke = stroke * x[:, -3:].view(-1, 1, 1, 3)
stroke = stroke.permute(0, 3, 1, 2)
color_stroke = color_stroke.permute(0, 3, 1, 2)
stroke = stroke.view(-1, 5, 1, width, width)
color_stroke = color_stroke.view(-1, 5, 3, width, width)
res = []
for i in range(5):
canvas = canvas * (1 - stroke[:, i]) + color_stroke[:, i]
res.append(canvas)
return canvas, res | null |
21,912 | import argparse
import tempfile
from pathlib import Path
import cog
import cv2
import imageio
from baseline.DRL.actor import *
from baseline.Renderer.model import *
def large2small(x, canvas_cnt, args, width):
# (d * width, d * width) -> (d * d, width, width)
x = x.reshape(args.divide, width, args.divide, width, 3)
x = np.transpose(x, (0, 2, 1, 3, 4))
x = x.reshape(canvas_cnt, width, width, 3)
return x | null |
21,913 | import argparse
import tempfile
from pathlib import Path
import cog
import cv2
import imageio
from baseline.DRL.actor import *
from baseline.Renderer.model import *
def small2large(x, args, width):
# (d * d, width, width) -> (d * width, d * width)
x = x.reshape(args.divide, args.divide, width, width, -1)
x = np.transpose(x, (0, 2, 1, 3, 4))
x = x.reshape(args.divide * width, args.divide * width, -1)
return x
def smooth(img, args, width):
def smooth_pix(img, tx, ty):
if (
tx == args.divide * width - 1
or ty == args.divide * width - 1
or tx == 0
or ty == 0
):
return img
img[tx, ty] = (
img[tx, ty]
+ img[tx + 1, ty]
+ img[tx, ty + 1]
+ img[tx - 1, ty]
+ img[tx, ty - 1]
+ img[tx + 1, ty - 1]
+ img[tx - 1, ty + 1]
+ img[tx - 1, ty - 1]
+ img[tx + 1, ty + 1]
) / 9
return img
for p in range(args.divide):
for q in range(args.divide):
x = p * width
y = q * width
for k in range(width):
img = smooth_pix(img, x + k, y + width - 1)
if q != args.divide - 1:
img = smooth_pix(img, x + k, y + width)
for k in range(width):
img = smooth_pix(img, x + width - 1, y + k)
if p != args.divide - 1:
img = smooth_pix(img, x + width, y + k)
return img
def save_img(res, origin_shape, args, width, divide=False):
output = res.detach().cpu().numpy()
output = np.transpose(output, (0, 2, 3, 1))
if divide:
output = small2large(output, args, width)
output = smooth(output, args, width)
else:
output = output[0]
output = (output * 255).astype("uint8")
output = cv2.resize(output, origin_shape)
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output | null |
21,914 | import csv
import collections
The provided code snippet includes necessary dependencies for implementing the `read_rides_as_tuples` function. Write a Python function `def read_rides_as_tuples(filename)` to solve the following problem:
Read the bus ride data as a list of tuples
Here is the function:
def read_rides_as_tuples(filename):
'''
Read the bus ride data as a list of tuples
'''
records = []
with open(filename) as f:
rows = csv.reader(f)
headings = next(rows) # Skip headers
for row in rows:
route = row[0]
date = row[1]
daytype = row[2]
rides = int(row[3])
record = (route, date, daytype, rides)
records.append(record)
return records | Read the bus ride data as a list of tuples |
21,915 | import csv
import collections
The provided code snippet includes necessary dependencies for implementing the `read_rides_as_dicts` function. Write a Python function `def read_rides_as_dicts(filename)` to solve the following problem:
Read the bus ride data as a list of dicts
Here is the function:
def read_rides_as_dicts(filename):
'''
Read the bus ride data as a list of dicts
'''
records = []
with open(filename) as f:
rows = csv.reader(f)
headings = next(rows) # Skip headers
for row in rows:
route = row[0]
date = row[1]
daytype = row[2]
rides = int(row[3])
record = {
'route': route,
'date': date,
'daytype': daytype,
'rides' : rides
}
records.append(record)
return records | Read the bus ride data as a list of dicts |
21,916 | import csv
class Row:
__slots__ = ('route', 'date', 'daytype', 'rides')
def __init__(self, route, date, daytype, rides):
self.route = route
self.date = date
self.daytype = daytype
self.rides = rides
import collections
The provided code snippet includes necessary dependencies for implementing the `read_rides_as_instances` function. Write a Python function `def read_rides_as_instances(filename)` to solve the following problem:
Read the bus ride data as a list of instances
Here is the function:
def read_rides_as_instances(filename):
'''
Read the bus ride data as a list of instances
'''
records = []
with open(filename) as f:
rows = csv.reader(f)
headings = next(rows) # Skip headers
for row in rows:
route = row[0]
date = row[1]
daytype = row[2]
rides = int(row[3])
record = Row(route, date, daytype, rides)
records.append(record)
return records | Read the bus ride data as a list of instances |
21,917 | import csv
import collections
The provided code snippet includes necessary dependencies for implementing the `read_rides_as_columns` function. Write a Python function `def read_rides_as_columns(filename)` to solve the following problem:
Read the bus ride data into 4 lists, representing columns
Here is the function:
def read_rides_as_columns(filename):
'''
Read the bus ride data into 4 lists, representing columns
'''
routes = []
dates = []
daytypes = []
numrides = []
with open(filename) as f:
rows = csv.reader(f)
headings = next(rows) # Skip headers
for row in rows:
routes.append(row[0])
dates.append(row[1])
daytypes.append(row[2])
numrides.append(int(row[3]))
return dict(routes=routes, dates=dates, daytypes=daytypes, numrides=numrides) | Read the bus ride data into 4 lists, representing columns |
21,918 | import csv
import collections
class RideData(collections.abc.Sequence):
def __init__(self):
# Each value is a list with all of the values (a column)
self.routes = []
self.dates = []
self.daytypes = []
self.numrides = []
def __len__(self):
# All lists assumed to have the same length
return len(self.routes)
def append(self, d):
self.routes.append(d['route'])
self.dates.append(d['date'])
self.daytypes.append(d['daytype'])
self.numrides.append(d['rides'])
def __getitem__(self, index):
return { 'route': self.routes[index],
'date': self.dates[index],
'daytype': self.daytypes[index],
'rides': self.numrides[index] }
The provided code snippet includes necessary dependencies for implementing the `read_rides_as_dicts` function. Write a Python function `def read_rides_as_dicts(filename)` to solve the following problem:
Read the bus ride data as a list of dicts
Here is the function:
def read_rides_as_dicts(filename):
'''
Read the bus ride data as a list of dicts
'''
records = RideData()
with open(filename) as f:
rows = csv.reader(f)
headings = next(rows) # Skip headers
for row in rows:
route = row[0]
date = row[1]
daytype = row[2]
rides = int(row[3])
record = {
'route': route,
'date': date,
'daytype': daytype,
'rides' : rides
}
records.append(record)
return records | Read the bus ride data as a list of dicts |
21,919 | class Integer(Typed):
expected_type = int
def add(x, y):
Integer.check(x)
Integer.check(y)
return x + y | null |
21,920 | import os
import time
import csv
from functools import wraps
def follow(filename,target):
with open(filename,"r") as f:
f.seek(0,os.SEEK_END)
while True:
line = f.readline()
if line != '':
target.send(line)
else:
time.sleep(0.1) | null |
21,921 | import os
import time
import csv
from functools import wraps
def consumer(func):
@wraps(func)
def start(*args,**kwargs):
f = func(*args,**kwargs)
f.send(None)
return f
return start | null |
21,922 | import os
import time
import csv
from functools import wraps
def printer():
while True:
try:
item = yield
print(item)
except Exception as e:
print('ERROR: %r' % e) | null |
21,923 | import os
import time
The provided code snippet includes necessary dependencies for implementing the `follow` function. Write a Python function `def follow(filename)` to solve the following problem:
Generator that produces a sequence of lines being written at the end of a file.
Here is the function:
def follow(filename):
'''
Generator that produces a sequence of lines being written at the end of a file.
'''
try:
with open(filename,'r') as f:
f.seek(0,os.SEEK_END)
while True:
line = f.readline()
if line == '':
time.sleep(0.1) # Sleep briefly to avoid busy wait
continue
yield line
except GeneratorExit:
print('Following Done') | Generator that produces a sequence of lines being written at the end of a file. |
21,924 | import os
import time
def splitter(lines):
for line in lines:
yield line.split(',')
def make_records(rows,names):
for row in rows:
yield dict(zip(names,row))
def unquote(records,keylist):
for r in records:
for key in keylist:
r[key] = r[key].strip('"')
yield r
def convert(records,converter,keylist):
for r in records:
for key in keylist:
r[key] = converter(r[key])
yield r
def parse_stock_data(lines):
rows = splitter(lines)
records = make_records(rows,['name','price','date','time',
'change','open','high','low','volume'])
records = unquote(records,["name","date","time"])
records = convert(records,float,['price','change','open','high','low'])
records = convert(records,int,['volume'])
return records | null |
21,925 | class Integer(Typed):
expected_type = int
from inspect import signature
def add(x:Integer, y:Integer) -> Integer:
return x + y | null |
21,926 | from .validate import Validator, validated
from collections import ChainMap
class Validator:
def __init__(self, name=None):
self.name = name
def __set_name__(self, cls, name):
self.name = name
def check(cls, value):
return value
def __set__(self, instance, value):
instance.__dict__[self.name] = self.check(value)
# Collect all derived classes into a dict
validators = { }
def __init_subclass__(cls):
cls.validators[cls.__name__] = cls
def validated(func):
sig = signature(func)
# Gather the function annotations
annotations = { name:val for name, val in func.__annotations__.items()
if isvalidator(val) }
# Get the return annotation (if any)
retcheck = annotations.pop('return', None)
def wrapper(*args, **kwargs):
bound = sig.bind(*args, **kwargs)
errors = []
# Enforce argument checks
for name, validator in annotations.items():
try:
validator.check(bound.arguments[name])
except Exception as e:
errors.append(f' {name}: {e}')
if errors:
raise TypeError('Bad Arguments\n' + '\n'.join(errors))
result = func(*args, **kwargs)
# Enforce return check (if any)
if retcheck:
try:
retcheck.check(result)
except Exception as e:
raise TypeError(f'Bad return: {e}') from None
return result
return wrapper
The provided code snippet includes necessary dependencies for implementing the `validate_attributes` function. Write a Python function `def validate_attributes(cls)` to solve the following problem:
Class decorator that scans a class definition for Validators and builds a _fields variable that captures their definition order.
Here is the function:
def validate_attributes(cls):
'''
Class decorator that scans a class definition for Validators
and builds a _fields variable that captures their definition order.
'''
validators = []
for name, val in vars(cls).items():
if isinstance(val, Validator):
validators.append(val)
# Apply validated decorator to any callable with annotations
elif callable(val) and val.__annotations__:
setattr(cls, name, validated(val))
# Collect all of the field names
cls._fields = tuple([v.name for v in validators])
# Collect type conversions. The lambda x:x is an identity
# function that's used in case no expected_type is found.
cls._types = tuple([ getattr(v, 'expected_type', lambda x: x)
for v in validators ])
# Create the __init__ method
if cls._fields:
cls.create_init()
return cls | Class decorator that scans a class definition for Validators and builds a _fields variable that captures their definition order. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.