id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
179,372
import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from pathlib import Path import datasets import torch from build_dataset import build_instruction_dataset, DataCollatorForSupervisedDataset import transformers from transformers import ( CONFIG_MAPPI...
r""" This method wraps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers`
179,373
import logging import os from dataclasses import dataclass from typing import Dict, Sequence, Union, List import datasets import torch from datasets import load_dataset, concatenate_datasets import transformers IGNORE_INDEX = -100 logger = logging.getLogger('__name__') PROMPT_TEMPLATE = ( "[INST] <<SYS>>\n" ...
null
179,374
from datasets import load_dataset import torch import random import numpy as np import json from transformers import LlamaTokenizer, AutoModelForCausalLM from transformers import BitsAndBytesConfig from tqdm import tqdm import os import argparse import sys from attn_and_long_ctx_patches import apply_attention_patch, ap...
null
179,375
from datasets import load_dataset import torch import random import numpy as np import json from transformers import LlamaTokenizer, AutoModelForCausalLM from transformers import BitsAndBytesConfig from tqdm import tqdm import os import argparse import sys from attn_and_long_ctx_patches import apply_attention_patch, ap...
null
179,376
import os import json import argparse import numpy as np from metrics import ( qa_f1_score, rouge_zh_score, qa_f1_zh_score, rouge_score, classification_score, retrieval_score, retrieval_zh_score, count_score, code_sim_score, ) def parse_args(args=None): parser = argparse.Argumen...
null
179,377
import os import json import argparse import numpy as np from metrics import ( qa_f1_score, rouge_zh_score, qa_f1_zh_score, rouge_score, classification_score, retrieval_score, retrieval_zh_score, count_score, code_sim_score, ) dataset2metric = { "narrativeqa": qa_f1_score, "q...
null
179,378
import os import json import argparse import numpy as np from metrics import ( qa_f1_score, rouge_zh_score, qa_f1_zh_score, rouge_score, classification_score, retrieval_score, retrieval_zh_score, count_score, code_sim_score, ) dataset2metric = { "narrativeqa": qa_f1_score, "q...
null
179,379
import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def count_score(prediction, ground_truth, **kwargs): numbers = re.findall(r"\d+", prediction) right_num = 0 for number in numbers: if str(number) == str(ground_tru...
null
179,380
import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def retrieval_score(prediction, ground_truth, **kwargs): pattern = r'Paragraph (\d+)' matches = re.findall(pattern, ground_truth) ground_truth_id = matches[0] numbers ...
null
179,381
import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def retrieval_zh_score(prediction, ground_truth, **kwargs): pattern = r'段落(\d+)' matches = re.findall(pattern, ground_truth) ground_truth_id = matches[0] numbers = re....
null
179,382
import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def code_sim_score(prediction, ground_truth, **kwargs): all_lines = prediction.lstrip('\n').split('\n') prediction = "" for line in all_lines: if ('`' not in line)...
null
179,383
import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def classification_score(prediction, ground_truth, **kwargs): em_match_list = [] all_classes = kwargs["all_classes"] for class_name in all_classes: if class_name i...
null
179,384
import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def rouge_score(prediction, ground_truth, **kwargs): def rouge_zh_score(prediction, ground_truth, **kwargs): prediction = " ".join(list(jieba.cut(prediction, cut_all=False))) ...
null
179,385
import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) ...
null
179,386
import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def normalize_zh_answer(s): """Lower text and remove punctuation, extra whitespace.""" def white_space_fix(text): return "".join(text.split()) def remove_punc(text)...
null
179,387
import argparse import json import os import gc import torch import peft from transformers import LlamaTokenizer from transformers.modeling_utils import dtype_byte_size from huggingface_hub import snapshot_download import re import shutil def jsonload(filename): with open(filename, "r") as file: d = json.l...
null
179,388
import argparse import json import os import gc import torch import peft from transformers import LlamaTokenizer from transformers.modeling_utils import dtype_byte_size from huggingface_hub import snapshot_download import re import shutil def translate_state_dict_key(k): k = k.replace("base_model.model.", "") i...
Convert and save the HF format weights to PTH format weights
179,389
import argparse import json import os import gc import torch import peft from transformers import LlamaTokenizer from transformers.modeling_utils import dtype_byte_size from huggingface_hub import snapshot_download import re import shutil def merge_shards(output_dir, num_shards: int): ckpt_filenames = sorted([f fo...
null
179,390
from __future__ import annotations import datetime as dt import logging from typing import Dict, List, Optional from dateutil.parser import parse from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestV...
Callback to convert string to datetime given as an iso8601 timestamp.
179,391
from __future__ import annotations import logging import pprint from collections.abc import Mapping from dataclasses import fields, is_dataclass from enum import Enum from typing import Any, Dict, List, Optional, Sized, Union from pydantic import BaseModel from metricflow.mf_logging.formatting import indent def mf_pfor...
Prints many objects in an indented form.
179,392
from __future__ import annotations import functools import logging import time from contextlib import contextmanager from typing import Callable, Iterator, TypeVar from typing_extensions import ParamSpec logger = logging.getLogger(__name__) ReturnType = TypeVar("ReturnType") ParametersType = ParamSpec("ParametersType")...
Logs how long a function took to run. If the runtime exceeds runtime_warning_threshold, then a warning is logged.
179,393
from __future__ import annotations import functools import logging import time from contextlib import contextmanager from typing import Callable, Iterator, TypeVar from typing_extensions import ParamSpec logger = logging.getLogger(__name__) The provided code snippet includes necessary dependencies for implementing the...
Logs the runtime of the enclosed code block.
179,394
from __future__ import annotations from typing import List from dbt_semantic_interfaces.call_parameter_sets import ParseWhereFilterException from dbt_semantic_interfaces.implementations.filters.where_filter import PydanticWhereFilter from metricflow.naming.linkable_spec_name import StructuredLinkableSpecName from metri...
Parses a string following the object-builder naming scheme into the corresponding GroupByParameter. The implementation of the query parameter classes seems incomplete and there needs to be follow up with the author of the query interface classes for the best approach. Right now, it seems like using the where filter is ...
179,395
from __future__ import annotations import itertools import logging from abc import ABC, abstractmethod from dataclasses import dataclass from enum import Enum from hashlib import sha1 from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, Sequence, Tuple, TypeVar, Union from dbt_semantic_interfaces.datac...
Produces a hash from a list of strings.
179,396
from __future__ import annotations import contextlib from abc import ABC, abstractmethod from dataclasses import InitVar, dataclass, field from datetime import date, datetime from enum import Enum from typing import Callable, ContextManager, Dict, Generic, Iterator, List, Optional, TypeVar from metricflow.dataflow.sql_...
null
179,397
from __future__ import annotations from enum import Enum from dbt_semantic_interfaces.enum_extension import assert_values_exhausted from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType The provided code snippet includes necessary dependencies for implementing the `is_expansive` function. Wri...
Expansive ≝ Op( X ∪ Y ∪ ...) = Op( Op(X) ∪ Op(Y) ∪ ...). NOTE: COUNT is only expansive because it's transformed into a SUM agg during model transformation
179,398
from __future__ import annotations from enum import Enum from dbt_semantic_interfaces.enum_extension import assert_values_exhausted from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType The provided code snippet includes necessary dependencies for implementing the `is_additive` function. Writ...
Indicates that if you sum values over a dimension grouping, you will still get an accurate result for this metric.
179,399
from __future__ import annotations from enum import Enum from dbt_semantic_interfaces.enum_extension import assert_values_exhausted from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType The provided code snippet includes necessary dependencies for implementing the `fill_nulls_with_0` function...
Indicates if charts should show 0 instead of null where there are gaps in data.
179,400
from __future__ import annotations from enum import Enum from dbt_semantic_interfaces.enum_extension import assert_values_exhausted from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType The provided code snippet includes necessary dependencies for implementing the `can_limit_dimension_values`...
Indicates if we can limit dimension values in charts. Currently, this means: 1. The dimensions we care about most are the ones with the highest numeric values 2. We can calculate the "other" column in the postprocessor (meaning the metric is expansive)
179,401
from __future__ import annotations The provided code snippet includes necessary dependencies for implementing the `assert_exactly_one_arg_set` function. Write a Python function `def assert_exactly_one_arg_set(**kwargs) -> None` to solve the following problem: Throws an assertion error if 0 or more than 1 argument is n...
Throws an assertion error if 0 or more than 1 argument is not None.
179,402
from __future__ import annotations The provided code snippet includes necessary dependencies for implementing the `assert_at_most_one_arg_set` function. Write a Python function `def assert_at_most_one_arg_set(**kwargs) -> None` to solve the following problem: Throws an assertion error if more than 1 argument is not No...
Throws an assertion error if more than 1 argument is not None.
179,403
from __future__ import annotations import html import logging import textwrap from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Any, Generic, Sequence, TypeVar import jinja2 from metricflow.dag.dag_to_text import MetricFlowDagTextFormatter from metricflow.dag.id_prefix import IdPr...
Make a graphviz label that can be used for rendering to an image. The title will be in a large font, while the properties will be listed in a table in a smaller font.
179,404
from __future__ import annotations from typing import List, Sequence from metricflow.sql.sql_exprs import ( SqlAggregateFunctionExpression, SqlColumnReference, SqlColumnReferenceExpression, SqlExpressionNode, SqlFunction, ) class SqlExpressionNode(DagNode, Visitable, ABC): """An SQL expression ...
Makes a coalesced expression of the given column from the given table aliases. e.g. table_aliases = ["a", "b"] column_alias = "is_instant" -> COALESCE(a.is_instant, b.is_instant)
179,405
from __future__ import annotations import logging from collections import OrderedDict from dataclasses import dataclass from itertools import chain from typing import Dict, List, Optional, Sequence, Tuple from dbt_semantic_interfaces.references import MetricReference, SemanticModelReference from dbt_semantic_interfaces...
Creates select columns for instance sets coming from multiple table as defined in table_alias_to_instance_set. Used in cases where you join multiple tables and need to render select columns to access all of those.
179,406
from __future__ import annotations import logging from collections import OrderedDict from typing import List, Optional, Sequence, Tuple, Union from dbt_semantic_interfaces.enum_extension import assert_values_exhausted from dbt_semantic_interfaces.naming.keywords import METRIC_TIME_ELEMENT_NAME from dbt_semantic_interf...
Build an expression like "ds BETWEEN CAST('2020-01-01' AS TIMESTAMP) AND CAST('2020-01-02' AS TIMESTAMP).
179,407
from __future__ import annotations import collections from dataclasses import dataclass from typing import Dict, Optional, Sequence, Tuple from metricflow.specs.specs import MeasureSpec, NonAdditiveDimensionSpec class GroupedMeasureSpecsByAdditiveness: """Results after grouping measures by their additive properties...
Bucket the provided measure specs by. - Additive Measures - Semi-additive measures containing the same non-additive dimension attributes
179,408
from __future__ import annotations import datetime as dt import logging import pathlib import traceback from functools import update_wrapper, wraps from typing import Any, Callable, List, Optional import click from dateutil.parser import parse import metricflow.cli.custom_click_types as click_custom from metricflow.cli...
Common options for a query.
179,409
from __future__ import annotations import datetime as dt import logging import pathlib import traceback from functools import update_wrapper, wraps from typing import Any, Callable, List, Optional import click from dateutil.parser import parse import metricflow.cli.custom_click_types as click_custom from metricflow.cli...
null
179,410
from __future__ import annotations import datetime as dt import logging import pathlib import traceback from functools import update_wrapper, wraps from typing import Any, Callable, List, Optional import click from dateutil.parser import parse import metricflow.cli.custom_click_types as click_custom from metricflow.cli...
Decorator to handle exceptions.
179,411
from __future__ import annotations import datetime as dt import logging import pathlib import traceback from functools import update_wrapper, wraps from typing import Any, Callable, List, Optional import click from dateutil.parser import parse import metricflow.cli.custom_click_types as click_custom from metricflow.cli...
Decorator to output an error message and exit if caller is not in a root directory of a dbt project.
179,412
from __future__ import annotations import datetime as dt import logging import pathlib import signal import sys import tempfile import textwrap import time import warnings from importlib.metadata import version as pkg_version from typing import Callable, List, Optional, Sequence import click import jinja2 import pandas...
null
179,413
from __future__ import annotations import datetime as dt import logging import pathlib import signal import sys import tempfile import textwrap import time import warnings from importlib.metadata import version as pkg_version from typing import Callable, List, Optional, Sequence import click import jinja2 import pandas...
Run user through a tutorial.
179,414
from __future__ import annotations import datetime as dt import logging import pathlib import signal import sys import tempfile import textwrap import time import warnings from importlib.metadata import version as pkg_version from typing import Callable, List, Optional, Sequence import click import jinja2 import pandas...
Create a new query with MetricFlow and assembles a MetricFlowQueryResult.
179,415
from __future__ import annotations import datetime as dt import logging import pathlib import signal import sys import tempfile import textwrap import time import warnings from importlib.metadata import version as pkg_version from typing import Callable, List, Optional, Sequence import click import jinja2 import pandas...
Retrieve metadata values about metrics/dimensions/entities/dimension values.
179,416
from __future__ import annotations import datetime as dt import logging import pathlib import signal import sys import tempfile import textwrap import time import warnings from importlib.metadata import version as pkg_version from typing import Callable, List, Optional, Sequence import click import jinja2 import pandas...
List the metrics with their available dimensions. Automatically truncates long lists of dimensions, pass --show-all-dims to see all.
179,417
from __future__ import annotations import datetime as dt import logging import pathlib import signal import sys import tempfile import textwrap import time import warnings from importlib.metadata import version as pkg_version from typing import Callable, List, Optional, Sequence import click import jinja2 import pandas...
List all unique entities.
179,418
from __future__ import annotations import datetime as dt import logging import pathlib import signal import sys import tempfile import textwrap import time import warnings from importlib.metadata import version as pkg_version from typing import Callable, List, Optional, Sequence import click import jinja2 import pandas...
Performs a health check against the DW provided in the configs.
179,419
from __future__ import annotations import datetime as dt import logging import pathlib import signal import sys import tempfile import textwrap import time import warnings from importlib.metadata import version as pkg_version from typing import Callable, List, Optional, Sequence import click import jinja2 import pandas...
List all dimension values with the corresponding metrics.
179,420
from __future__ import annotations import datetime as dt import logging import pathlib import signal import sys import tempfile import textwrap import time import warnings from importlib.metadata import version as pkg_version from typing import Callable, List, Optional, Sequence import click import jinja2 import pandas...
Perform validations against the defined model configurations.
179,421
from __future__ import annotations from typing import Optional from dbt_semantic_interfaces.implementations.filters.where_filter import PydanticWhereFilter from dbt_semantic_interfaces.protocols import WhereFilter, WhereFilterIntersection The provided code snippet includes necessary dependencies for implementing the `...
Returns a single where filter that is equivalent to the given intersection.
179,422
from __future__ import annotations from datetime import date from typing import Union import pandas as pd from dbt_semantic_interfaces.enum_extension import ExtendedEnum, assert_values_exhausted from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity The provided code snippet includes necessary...
Offset object to use for adjusting by one granularity period.
179,423
from __future__ import annotations from datetime import date from typing import Union import pandas as pd from dbt_semantic_interfaces.enum_extension import ExtendedEnum, assert_values_exhausted from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity The provided code snippet includes necessary...
Indicates that this can only be calculated if query results display the first or last date of the period.
179,424
from __future__ import annotations from datetime import date from typing import Union import pandas as pd from dbt_semantic_interfaces.enum_extension import ExtendedEnum, assert_values_exhausted from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity def is_period_start(time_granularity: TimeGra...
Adjust date_to_adjust to be start or end of period based on if date_to_match is at start or end of period.
179,425
from __future__ import annotations from datetime import date from typing import Union import pandas as pd from dbt_semantic_interfaces.enum_extension import ExtendedEnum, assert_values_exhausted from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity def string_to_time_granularity(s: str) -> Ti...
null
179,426
from __future__ import annotations from dataclasses import dataclass from typing import Sequence import rapidfuzz.fuzz import rapidfuzz.process class ScoredItem: # noqa: D item_str: str # fuzz scores from 0..100, and the higher the score, the better the match. score: float The provided code snippet includ...
Return the top items (by edit distance) in candidate_items that fuzzy matches the given item. Return scores from -1 -> 0 inclusive.
179,427
from __future__ import annotations from dbt_semantic_interfaces.implementations.semantic_manifest import ( PydanticSemanticManifest, ) from dbt_semantic_interfaces.transformations.boolean_measure import ( BooleanMeasureAggregationRule, ) from dbt_semantic_interfaces.transformations.convert_count import ConvertC...
Parse a PydanticSemanticManifest given the generated semantic_manifest json from dbt.
179,428
from __future__ import annotations import logging import time from collections import defaultdict from dataclasses import dataclass from typing import Dict, FrozenSet, List, Optional, Sequence, Set, Tuple from dbt_semantic_interfaces.enum_extension import assert_values_exhausted from dbt_semantic_interfaces.protocols.d...
Generates different versions of the given dimension, but at other valid time granularities.
179,429
from __future__ import annotations import datetime import functools import logging import os import platform import sys import time import traceback import uuid from hashlib import sha256 from typing import Callable, List, Optional, TypeVar from typing_extensions import ParamSpec from metricflow.random_id import random...
Decorator to make it easier to log telemetry for function calls. Using module_name instead of introspection since it seems more robust. Example call: @log_call(telemetry_reporter=telemetry_reporter, module_name=__name__) def test_function() -> str: return "foo"
179,430
import torch import dataclasses from dataclasses import dataclass import logging import os import io import json from typing import Sequence, Dict, List, Any import copy from EdgeGPT import Chatbot, ConversationStyle import transformers from torch.utils.data import Dataset from enum import auto, Enum async def edgegpt...
null
179,431
import torch import dataclasses from dataclasses import dataclass import logging import os import io import json from typing import Sequence, Dict, List, Any import copy from EdgeGPT import Chatbot, ConversationStyle import transformers from torch.utils.data import Dataset from enum import auto, Enum def _make_w_io_bas...
Dump a str or dictionary to a file in json format. Args: obj: An object to be written. f: A string path to the location on disk. mode: Mode for opening the file. indent: Indent for storing json dictionaries. default: A function to handle non-serializable entries; defaults to `str`.
179,432
import torch import dataclasses from dataclasses import dataclass import logging import os import io import json from typing import Sequence, Dict, List, Any import copy from EdgeGPT import Chatbot, ConversationStyle import transformers from torch.utils.data import Dataset from enum import auto, Enum def _make_r_io_bas...
Load a .json file into a dictionary.
179,433
import torch import dataclasses from dataclasses import dataclass import logging import os import io import json from typing import Sequence, Dict, List, Any import copy from EdgeGPT import Chatbot, ConversationStyle import transformers from torch.utils.data import Dataset from enum import auto, Enum The provided code...
Collects the state dict and dump to disk.
179,434
import torch import dataclasses from dataclasses import dataclass import logging import os import io import json from typing import Sequence, Dict, List, Any import copy from EdgeGPT import Chatbot, ConversationStyle import transformers from torch.utils.data import Dataset from enum import auto, Enum The provided code...
Resize tokenizer and embedding. Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
179,435
import torch import dataclasses from dataclasses import dataclass import logging import os import io import json from typing import Sequence, Dict, List, Any import copy from EdgeGPT import Chatbot, ConversationStyle import transformers from torch.utils.data import Dataset from enum import auto, Enum default_conversati...
Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
179,436
import torch import dataclasses from dataclasses import dataclass import logging import os import io import json from typing import Sequence, Dict, List, Any import copy from EdgeGPT import Chatbot, ConversationStyle import transformers from torch.utils.data import Dataset from enum import auto, Enum class SupervisedDa...
Make dataset and collator for supervised fine-tuning.
179,437
import torch import dataclasses from dataclasses import dataclass import logging import os import io import json from typing import Sequence, Dict, List, Any import copy from EdgeGPT import Chatbot, ConversationStyle import transformers from torch.utils.data import Dataset from enum import auto, Enum def convert_vicun...
null
179,438
import torch import dataclasses from dataclasses import dataclass import logging import os import io import json from typing import Sequence, Dict, List, Any import copy from EdgeGPT import Chatbot, ConversationStyle import transformers from torch.utils.data import Dataset from enum import auto, Enum def generate_stre...
null
179,439
import os import logging import transformers from transformers import LlamaForCausalLM, LlamaTokenizer from peft import ( LoraConfig, get_peft_model, get_peft_model_state_dict, ) from omegaconf import OmegaConf from ingest_docs import ingest_docs from data_gen import launch_data_generation from langchain.em...
null
179,440
import os import time import utils import json import random import string import regex as re import pickle import openai import tqdm import asyncio import tiktoken from langchain.docstore.document import Document from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores.faiss import FAISS def find...
null
179,441
import os import time import utils import json import random import string import regex as re import pickle import openai import tqdm import asyncio import tiktoken from langchain.docstore.document import Document from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores.faiss import FAISS def laun...
null
179,442
import os import time import utils import json import random import string import regex as re import pickle import openai import tqdm import asyncio import tiktoken from langchain.docstore.document import Document from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores.faiss import FAISS def launc...
null
179,443
import pickle as pkl from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig import torch from utils import conv_v1_2, SeparatorStyle from utils import generate_stream as generate_stream_func import argparse import os.path as osp def args_parse(): parser = argparse.ArgumentParser(description='I...
null
179,444
import pickle as pkl from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig import torch from utils import conv_v1_2, SeparatorStyle from utils import generate_stream as generate_stream_func import argparse import os.path as osp class SimpleChatIO: def prompt_for_input(self, role) -> str: ...
null
179,445
from typing import List, Optional, Tuple import torch import transformers from transformers.models.llama.modeling_llama import apply_rotary_pos_emb from einops import rearrange from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func from flash_attn.bert_padding import unpad_input, pad_input def f...
null
179,446
import os from collections import deque from typing import Dict, List, Optional, Any from langchain import LLMChain, OpenAI, PromptTemplate from langchain.embeddings import OpenAIEmbeddings from langchain.llms import BaseLLM from langchain.vectorstores.base import VectorStore from pydantic import BaseModel, Field from ...
Get the next task.
179,447
import os from collections import deque from typing import Dict, List, Optional, Any from langchain import LLMChain, OpenAI, PromptTemplate from langchain.embeddings import OpenAIEmbeddings from langchain.llms import BaseLLM from langchain.vectorstores.base import VectorStore from pydantic import BaseModel, Field from ...
Prioritize tasks.
179,448
import os from collections import deque from typing import Dict, List, Optional, Any from langchain import LLMChain, OpenAI, PromptTemplate from langchain.embeddings import OpenAIEmbeddings from langchain.llms import BaseLLM from langchain.vectorstores.base import VectorStore from pydantic import BaseModel, Field from ...
Execute a task.
179,449
from memory_store import MemoryStorage from disk_store import DiskStorage class MemoryStorage: def __init__(self) -> None: self.data: dict[str, str] = {} def set(self, key: str, value: str) -> None: self.data[key] = value def get(self, key: str) -> str: return self.data.get(key, "...
null
179,450
from memory_store import MemoryStorage from disk_store import DiskStorage class DiskStorage: """ Implements the KV store on the disk Args: file_name (str): name of the file where all the data will be written. Just passing the file name will save the data in the current directory. You m...
null
179,451
from memory_store import MemoryStorage from disk_store import DiskStorage class DiskStorage: """ Implements the KV store on the disk Args: file_name (str): name of the file where all the data will be written. Just passing the file name will save the data in the current directory. You m...
null
179,452
import struct import typing HEADER_SIZE: typing.Final[int] = 12 def encode_header(timestamp: int, key_size: int, value_size: int) -> bytes: """ encode_header encodes the data into bytes using the `HEADER_FORMAT` format string Args: timestamp (int): Timestamp at which we wrote the KV pair to the ...
encode_kv encodes the KV pair into bytes Args: timestamp (int): Timestamp at which we wrote the KV pair to the disk. The value is current time in seconds since the epoch. key (str): the key (cannot exceed the maximum size) value (str): the value (cannot exceed the maximum size) Returns: tuple containing the size of enc...
179,453
import struct import typing HEADER_FORMAT: typing.Final[str] = "<LLL" HEADER_SIZE: typing.Final[int] = 12 The provided code snippet includes necessary dependencies for implementing the `decode_kv` function. Write a Python function `def decode_kv(data: bytes) -> tuple[int, str, str]` to solve the following problem: dec...
decode_kv decodes the data bytes into appropriate KV pair Args: data (bytes): byte object containing the encoded KV data Returns: A tuple containing: timestamp (int): timestamp in epoch seconds key (str): the key value (str): the value Raises: struct.error: when parameters don't match the specific type / size IndexErro...
179,454
import struct import typing HEADER_FORMAT: typing.Final[str] = "<LLL" The provided code snippet includes necessary dependencies for implementing the `decode_header` function. Write a Python function `def decode_header(data: bytes) -> tuple[int, int, int]` to solve the following problem: decode_header decodes the bytes...
decode_header decodes the bytes into header using the `HEADER_FORMAT` format string Args: data (bytes): byte object containing the encoded header data Returns: A tuple containing: timestamp (int): timestamp in epoch seconds key_size (int): size of the key value_size (int): size of the value Raises: struct.error: when p...
179,455
import os.path import pathlib import json from datetime import date def year_month(date_str): # extract string of year-month from date, eg: '2023-03' return str(date_str)[:7]
null
179,456
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker The provided cod...
Returns the text of a message, excluding any bot commands.
179,457
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker def is_group_chat...
Gets the stream cutoff values for the message length
179,458
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker The provided cod...
Splits a string into chunks of a given size.
179,459
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker def get_thread_id...
Wraps a coroutine while repeatedly sending a chat action to the user.
179,460
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker The provided cod...
Edit a message with retry logic in case of failure (e.g. broken markdown) :param context: The context to use :param chat_id: The chat id to edit the message in :param message_id: The message id to edit :param text: The text to edit the message with :param markdown: Whether to use markdown parse mode :param is_inline: W...
179,461
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker The provided cod...
Handles errors in the telegram-python-bot library.
179,462
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker async def is_user...
Checks if the user is allowed to use the bot.
179,463
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker def get_remaining...
Checks if the user reached their usage limit. Initializes UsageTracker for user and guest when needed. :param config: The bot configuration object :param usage: The usage tracker object :param update: Telegram update object :param is_inline: Boolean flag for inline queries :return: Boolean indicating if the user has a ...
179,464
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker The provided cod...
Add chat request to usage tracker :param usage: The usage tracker object :param config: The bot configuration object :param user_id: The user id :param used_tokens: The number of tokens used
179,465
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker The provided cod...
Checks if the dict contains a direct result that can be sent directly to the user :param response: The response value :return: Boolean indicating if the result is a direct result
179,466
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker def get_thread_id...
Handles a direct result from a plugin
179,467
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker def encode_image...
null
179,468
from __future__ import annotations import asyncio import itertools import json import logging import os import base64 import telegram from telegram import Message, MessageEntity, Update, ChatMember, constants from telegram.ext import CallbackContext, ContextTypes from usage_tracker import UsageTracker def decode_image...
null
179,469
from __future__ import annotations import datetime import logging import os import tiktoken import openai import requests import json import httpx import io from datetime import date from calendar import monthrange from PIL import Image from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type...
Gets the default number of max tokens for the given model. :param model: The model name :return: The default number of max tokens
179,470
from __future__ import annotations import datetime import logging import os import tiktoken import openai import requests import json import httpx import io from datetime import date from calendar import monthrange from PIL import Image from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type...
Whether the given model supports functions
179,471
from __future__ import annotations import datetime import logging import os import tiktoken import openai import requests import json import httpx import io from datetime import date from calendar import monthrange from PIL import Image from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type...
Return translated text for a key in specified bot_language. Keys and translations can be found in the translations.json.