id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
179,372 | import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from pathlib import Path
import datasets
import torch
from build_dataset import build_instruction_dataset, DataCollatorForSupervisedDataset
import transformers
from transformers import (
CONFIG_MAPPING,
AutoConfig,
BitsAndBytesConfig,
LlamaForCausalLM,
LlamaTokenizer,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
from peft.tuners.lora import LoraLayer
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
The provided code snippet includes necessary dependencies for implementing the `prepare_model_for_kbit_training` function. Write a Python function `def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True)` to solve the following problem:
r""" This method wraps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers`
Here is the function:
def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True):
r"""
This method wraps the entire protocol for preparing a model before running a training. This includes:
1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm
head to fp32
Args:
model, (`transformers.PreTrainedModel`):
The loaded model from `transformers`
"""
loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)
for name, param in model.named_parameters():
# freeze base model's layers
param.requires_grad = False
# cast all non INT8/INT4 parameters to fp32
for param in model.parameters():
if ((param.dtype == torch.float16) or (param.dtype == torch.bfloat16)) and loaded_in_kbit:
param.data = param.data.to(torch.float32)
for name, module in model.named_modules():
if 'norm' in name:
module = module.to(torch.float32)
if loaded_in_kbit and use_gradient_checkpointing:
# For backward compatibility
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, _input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
# enable gradient checkpointing for memory efficiency
model.gradient_checkpointing_enable()
return model | r""" This method wraps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers` |
179,373 | import logging
import os
from dataclasses import dataclass
from typing import Dict, Sequence, Union, List
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
IGNORE_INDEX = -100
logger = logging.getLogger('__name__')
PROMPT_TEMPLATE = (
"[INST] <<SYS>>\n"
"You are a helpful assistant. 你是一个乐于助人的助手。\n"
"<</SYS>>\n\n{instruction} [/INST]"
)
def build_instruction_dataset(data_path: Union[List[str],str],
tokenizer: transformers.PreTrainedTokenizer,
max_seq_length: int, data_cache_dir = None,
preprocessing_num_workers = None,
):
def tokenization(examples):
sources = []
targets = []
prompt = PROMPT_TEMPLATE
for instruction, input, output in zip(examples['instruction'],examples['input'],examples['output']):
if input is not None and input !="":
instruction = instruction+'\n'+input
source = prompt.format_map({'instruction':instruction})
target = f"{output}{tokenizer.eos_token}"
sources.append(source)
targets.append(target)
tokenized_sources = tokenizer(sources,return_attention_mask=False)
tokenized_targets = tokenizer(targets,return_attention_mask=False,add_special_tokens=False)
all_input_ids = []
all_labels = []
for s,t in zip(tokenized_sources['input_ids'],tokenized_targets['input_ids']):
input_ids = torch.LongTensor(s + t)[:max_seq_length]
labels = torch.LongTensor([IGNORE_INDEX] * len(s) + t)[:max_seq_length]
assert len(input_ids) == len(labels)
all_input_ids.append(input_ids)
all_labels.append(labels)
results = {'input_ids':all_input_ids, 'labels': all_labels}
return results
logging.warning("building dataset...")
all_datasets = []
if not isinstance(data_path,(list,tuple)):
data_path = [data_path]
for file in data_path:
if data_cache_dir is None:
data_cache_dir = str(os.path.dirname(file))
cache_path = os.path.join(data_cache_dir,os.path.basename(file).split('.')[0]+f"_{max_seq_length}")
os.makedirs(cache_path, exist_ok=True)
try:
processed_dataset = datasets.load_from_disk(cache_path)
logger.info(f'training datasets-{file} has been loaded from disk')
except Exception:
raw_dataset = load_dataset("json", data_files=file, cache_dir=cache_path)
tokenization_func = tokenization
tokenized_dataset = raw_dataset.map(
tokenization_func,
batched=True,
num_proc=preprocessing_num_workers,
remove_columns=["instruction","input","output"],
keep_in_memory=False,
desc="preprocessing on dataset",
)
processed_dataset = tokenized_dataset
processed_dataset.save_to_disk(cache_path)
processed_dataset.set_format('torch')
all_datasets.append(processed_dataset['train'])
all_datasets = concatenate_datasets(all_datasets)
return all_datasets | null |
179,374 | from datasets import load_dataset
import torch
import random
import numpy as np
import json
from transformers import LlamaTokenizer, AutoModelForCausalLM
from transformers import BitsAndBytesConfig
from tqdm import tqdm
import os
import argparse
import sys
from attn_and_long_ctx_patches import apply_attention_patch, apply_ntk_scaling_patch
args = parser.parse_args()
DO_SAMPLE =True
TEMPERATURE = 0.2
REPETITION_PENALTY = 1.1
TOP_P = 0.95
TOP_K = 40
if args.use_ntk:
apply_ntk_scaling_patch(args.alpha)
def fill_llama2_prompt_template(instruction, with_inst = True, with_system_prompt = True, system_prompt = DEFAULT_SYSTEM_PROMPT):
if with_inst is False:
return instruction
if with_system_prompt is True:
return TEMPLATE.format_map({'instruction': instruction,'system_prompt': system_prompt})
else:
return "[INST] {instruction} [/INST]"
def get_pred(model, tokenizer, data, max_length, max_gen, prompt_format, dataset, device):
preds = []
for json_obj in tqdm(data):
prompt = prompt_format.format(**json_obj)
# truncate to fit max_length (we suggest truncate in the middle, since the left and right side may contain crucial instructions)
tokenized_prompt = tokenizer(prompt, truncation=False, return_tensors="pt").input_ids[0]
if len(tokenized_prompt) > max_length:
half = int(max_length/2)
prompt = tokenizer.decode(tokenized_prompt[:half], skip_special_tokens=True)+tokenizer.decode(tokenized_prompt[-half:], skip_special_tokens=True)
if args.with_inst == 'auto':
if dataset not in ["trec", "triviaqa", "samsum", "lsht", "lcc", "repobench-p"]: # chat models are better off without build prompts on these tasks
prompt = fill_llama2_prompt_template(instruction=prompt)
elif args.with_inst == 'true':
prompt = fill_llama2_prompt_template(instruction=prompt, with_inst = True)
else:
prompt = fill_llama2_prompt_template(instruction=prompt, with_inst = False)
input_data = tokenizer(prompt, truncation=False, return_tensors="pt").to(device)
context_length = input_data.input_ids.shape[-1]
if dataset == "samsum": # prevent illegal output on samsum (model endlessly repeat "\nDialogue"), might be a prompting issue
output = model.generate(
**input_data,
max_new_tokens=max_gen,
num_beams=1,
do_sample=DO_SAMPLE,
repetition_penalty = REPETITION_PENALTY,
top_p = TOP_P,
top_k = TOP_K,
temperature=TEMPERATURE,
min_length=context_length+1,
eos_token_id=[tokenizer.eos_token_id, tokenizer.encode("\n", add_special_tokens=False)[-1]],
)[0]
else:
output = model.generate(
**input_data,
max_new_tokens=max_gen,
num_beams=1,
do_sample=DO_SAMPLE,
repetition_penalty = REPETITION_PENALTY,
top_p = TOP_P,
top_k = TOP_K,
temperature=TEMPERATURE
)[0]
pred = tokenizer.decode(output[context_length:], skip_special_tokens=True)
#print(pred)
preds.append({"pred": pred, "answers": json_obj["answers"], "all_classes": json_obj["all_classes"], "length": json_obj["length"]})
return preds | null |
179,375 | from datasets import load_dataset
import torch
import random
import numpy as np
import json
from transformers import LlamaTokenizer, AutoModelForCausalLM
from transformers import BitsAndBytesConfig
from tqdm import tqdm
import os
import argparse
import sys
from attn_and_long_ctx_patches import apply_attention_patch, apply_ntk_scaling_patch
def seed_everything(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(seed) | null |
179,376 | import os
import json
import argparse
import numpy as np
from metrics import (
qa_f1_score,
rouge_zh_score,
qa_f1_zh_score,
rouge_score,
classification_score,
retrieval_score,
retrieval_zh_score,
count_score,
code_sim_score,
)
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir')
parser.add_argument('--e', action='store_true', help="Evaluate on LongBench-E")
return parser.parse_args(args) | null |
179,377 | import os
import json
import argparse
import numpy as np
from metrics import (
qa_f1_score,
rouge_zh_score,
qa_f1_zh_score,
rouge_score,
classification_score,
retrieval_score,
retrieval_zh_score,
count_score,
code_sim_score,
)
dataset2metric = {
"narrativeqa": qa_f1_score,
"qasper": qa_f1_score,
"multifieldqa_en": qa_f1_score,
"multifieldqa_zh": qa_f1_zh_score,
"hotpotqa": qa_f1_score,
"2wikimqa": qa_f1_score,
"musique": qa_f1_score,
"dureader": rouge_zh_score,
"gov_report": rouge_score,
"qmsum": rouge_score,
"multi_news": rouge_score,
"vcsum": rouge_zh_score,
"trec": classification_score,
"triviaqa": qa_f1_score,
"samsum": rouge_score,
"lsht": classification_score,
"passage_retrieval_en": retrieval_score,
"passage_count": count_score,
"passage_retrieval_zh": retrieval_zh_score,
"lcc": code_sim_score,
"repobench-p": code_sim_score,
}
def scorer_e(dataset, predictions, answers, lengths, all_classes):
scores = {"0-4k": [], "4-8k": [], "8k+": []}
for (prediction, ground_truths, length) in zip(predictions, answers, lengths):
score = 0.
if dataset in ["trec", "triviaqa", "samsum", "lsht"]:
prediction = prediction.lstrip('\n').split('\n')[0]
for ground_truth in ground_truths:
score = max(score, dataset2metric[dataset](prediction, ground_truth, all_classes=all_classes))
if length < 4000:
scores["0-4k"].append(score)
elif length < 8000:
scores["4-8k"].append(score)
else:
scores["8k+"].append(score)
for key in scores.keys():
scores[key] = round(100 * np.mean(scores[key]), 2)
return scores | null |
179,378 | import os
import json
import argparse
import numpy as np
from metrics import (
qa_f1_score,
rouge_zh_score,
qa_f1_zh_score,
rouge_score,
classification_score,
retrieval_score,
retrieval_zh_score,
count_score,
code_sim_score,
)
dataset2metric = {
"narrativeqa": qa_f1_score,
"qasper": qa_f1_score,
"multifieldqa_en": qa_f1_score,
"multifieldqa_zh": qa_f1_zh_score,
"hotpotqa": qa_f1_score,
"2wikimqa": qa_f1_score,
"musique": qa_f1_score,
"dureader": rouge_zh_score,
"gov_report": rouge_score,
"qmsum": rouge_score,
"multi_news": rouge_score,
"vcsum": rouge_zh_score,
"trec": classification_score,
"triviaqa": qa_f1_score,
"samsum": rouge_score,
"lsht": classification_score,
"passage_retrieval_en": retrieval_score,
"passage_count": count_score,
"passage_retrieval_zh": retrieval_zh_score,
"lcc": code_sim_score,
"repobench-p": code_sim_score,
}
def scorer(dataset, predictions, answers, all_classes):
total_score = 0.
for (prediction, ground_truths) in zip(predictions, answers):
score = 0.
if dataset in ["trec", "triviaqa", "samsum", "lsht"]:
prediction = prediction.lstrip('\n').split('\n')[0]
for ground_truth in ground_truths:
score = max(score, dataset2metric[dataset](prediction, ground_truth, all_classes=all_classes))
total_score += score
return round(100 * total_score / len(predictions), 2) | null |
179,379 | import re
import string
import jieba
from fuzzywuzzy import fuzz
import difflib
from collections import Counter
from rouge import Rouge
def count_score(prediction, ground_truth, **kwargs):
numbers = re.findall(r"\d+", prediction)
right_num = 0
for number in numbers:
if str(number) == str(ground_truth):
right_num += 1
final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers)
return float(final_score) | null |
179,380 | import re
import string
import jieba
from fuzzywuzzy import fuzz
import difflib
from collections import Counter
from rouge import Rouge
def retrieval_score(prediction, ground_truth, **kwargs):
pattern = r'Paragraph (\d+)'
matches = re.findall(pattern, ground_truth)
ground_truth_id = matches[0]
numbers = re.findall(r"\d+", prediction)
right_num = 0
for number in numbers:
if str(number) == str(ground_truth_id):
right_num += 1
final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers)
return float(final_score) | null |
179,381 | import re
import string
import jieba
from fuzzywuzzy import fuzz
import difflib
from collections import Counter
from rouge import Rouge
def retrieval_zh_score(prediction, ground_truth, **kwargs):
pattern = r'段落(\d+)'
matches = re.findall(pattern, ground_truth)
ground_truth_id = matches[0]
numbers = re.findall(r"\d+", prediction)
right_num = 0
for number in numbers:
if str(number) == str(ground_truth_id):
right_num += 1
final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers)
return float(final_score) | null |
179,382 | import re
import string
import jieba
from fuzzywuzzy import fuzz
import difflib
from collections import Counter
from rouge import Rouge
def code_sim_score(prediction, ground_truth, **kwargs):
all_lines = prediction.lstrip('\n').split('\n')
prediction = ""
for line in all_lines:
if ('`' not in line) and ('#' not in line) and ('//' not in line):
prediction = line
break
return (fuzz.ratio(prediction, ground_truth) / 100) | null |
179,383 | import re
import string
import jieba
from fuzzywuzzy import fuzz
import difflib
from collections import Counter
from rouge import Rouge
def classification_score(prediction, ground_truth, **kwargs):
em_match_list = []
all_classes = kwargs["all_classes"]
for class_name in all_classes:
if class_name in prediction:
em_match_list.append(class_name)
for match_term in em_match_list:
if match_term in ground_truth and match_term != ground_truth:
em_match_list.remove(match_term)
if em_match_list != 0:
if ground_truth in em_match_list:
score = (1.0 / len(em_match_list))
else:
score = 0.0
else:
best_match = None
highest_similarity = 0
for string in all_classes:
similarity = difflib.SequenceMatcher(None, string, prediction).ratio()
if similarity > highest_similarity:
highest_similarity = similarity
best_match = string
score = float(best_match == ground_truth)
return score | null |
179,384 | import re
import string
import jieba
from fuzzywuzzy import fuzz
import difflib
from collections import Counter
from rouge import Rouge
def rouge_score(prediction, ground_truth, **kwargs):
def rouge_zh_score(prediction, ground_truth, **kwargs):
prediction = " ".join(list(jieba.cut(prediction, cut_all=False)))
ground_truth = " ".join(list(jieba.cut(ground_truth, cut_all=False)))
score = rouge_score(prediction, ground_truth)
return score | null |
179,385 | import re
import string
import jieba
from fuzzywuzzy import fuzz
import difflib
from collections import Counter
from rouge import Rouge
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth, **kwargs):
common = Counter(prediction) & Counter(ground_truth)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction)
recall = 1.0 * num_same / len(ground_truth)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def qa_f1_score(prediction, ground_truth, **kwargs):
normalized_prediction = normalize_answer(prediction)
normalized_ground_truth = normalize_answer(ground_truth)
prediction_tokens = normalized_prediction.split()
ground_truth_tokens = normalized_ground_truth.split()
return f1_score(prediction_tokens, ground_truth_tokens) | null |
179,386 | import re
import string
import jieba
from fuzzywuzzy import fuzz
import difflib
from collections import Counter
from rouge import Rouge
def normalize_zh_answer(s):
"""Lower text and remove punctuation, extra whitespace."""
def white_space_fix(text):
return "".join(text.split())
def remove_punc(text):
cn_punctuation = "!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
all_punctuation = set(string.punctuation + cn_punctuation)
return "".join(ch for ch in text if ch not in all_punctuation)
def lower(text):
return text.lower()
return white_space_fix(remove_punc(lower(s)))
def f1_score(prediction, ground_truth, **kwargs):
common = Counter(prediction) & Counter(ground_truth)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction)
recall = 1.0 * num_same / len(ground_truth)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def qa_f1_zh_score(prediction, ground_truth, **kwargs):
prediction_tokens = list(jieba.cut(prediction, cut_all=False))
ground_truth_tokens = list(jieba.cut(ground_truth, cut_all=False))
prediction_tokens = [normalize_zh_answer(token) for token in prediction_tokens]
ground_truth_tokens = [normalize_zh_answer(token) for token in ground_truth_tokens]
prediction_tokens = [token for token in prediction_tokens if len(token) > 0]
ground_truth_tokens = [token for token in ground_truth_tokens if len(token) > 0]
return f1_score(prediction_tokens, ground_truth_tokens) | null |
179,387 | import argparse
import json
import os
import gc
import torch
import peft
from transformers import LlamaTokenizer
from transformers.modeling_utils import dtype_byte_size
from huggingface_hub import snapshot_download
import re
import shutil
def jsonload(filename):
with open(filename, "r") as file:
d = json.load(file)
return d | null |
179,388 | import argparse
import json
import os
import gc
import torch
import peft
from transformers import LlamaTokenizer
from transformers.modeling_utils import dtype_byte_size
from huggingface_hub import snapshot_download
import re
import shutil
def translate_state_dict_key(k):
k = k.replace("base_model.model.", "")
if k == "model.embed_tokens.weight":
return "tok_embeddings.weight"
elif k == "model.norm.weight":
return "norm.weight"
elif k == "lm_head.weight":
return "output.weight"
elif k.startswith("model.layers."):
layer = k.split(".")[2]
if k.endswith(".self_attn.q_proj.weight"):
return f"layers.{layer}.attention.wq.weight"
elif k.endswith(".self_attn.k_proj.weight"):
return f"layers.{layer}.attention.wk.weight"
elif k.endswith(".self_attn.v_proj.weight"):
return f"layers.{layer}.attention.wv.weight"
elif k.endswith(".self_attn.o_proj.weight"):
return f"layers.{layer}.attention.wo.weight"
elif k.endswith(".mlp.gate_proj.weight"):
return f"layers.{layer}.feed_forward.w1.weight"
elif k.endswith(".mlp.down_proj.weight"):
return f"layers.{layer}.feed_forward.w2.weight"
elif k.endswith(".mlp.up_proj.weight"):
return f"layers.{layer}.feed_forward.w3.weight"
elif k.endswith(".input_layernorm.weight"):
return f"layers.{layer}.attention_norm.weight"
elif k.endswith(".post_attention_layernorm.weight"):
return f"layers.{layer}.ffn_norm.weight"
elif k.endswith("rotary_emb.inv_freq") or "lora" in k:
return None
else:
print(layer, k)
raise NotImplementedError
else:
print(k)
raise NotImplementedError
def unpermute(w):
return (
w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim)
)
The provided code snippet includes necessary dependencies for implementing the `save_shards` function. Write a Python function `def save_shards(model_sd, num_shards: int, prefix="", verbose=False)` to solve the following problem:
Convert and save the HF format weights to PTH format weights
Here is the function:
def save_shards(model_sd, num_shards: int, prefix="", verbose=False):
"""
Convert and save the HF format weights to PTH format weights
"""
with torch.no_grad():
if num_shards == 1:
new_state_dict = {}
for k, v in model_sd.items():
new_k = translate_state_dict_key(k)
if new_k is not None:
if "wq" in new_k or "wk" in new_k:
new_state_dict[new_k] = unpermute(v)
else:
new_state_dict[new_k] = v
os.makedirs(output_dir, exist_ok=True)
print(f"Saving shard 1 of {num_shards} into {output_dir}/{prefix}consolidated.00.pth")
torch.save(new_state_dict, output_dir + f"/{prefix}consolidated.00.pth")
else:
new_state_dicts = [dict() for _ in range(num_shards)]
for k in list(model_sd.keys()):
v = model_sd[k]
new_k = translate_state_dict_key(k)
if new_k is not None:
if new_k=='tok_embeddings.weight':
assert v.size(1)%num_shards==0
splits = v.split(v.size(1)//num_shards,dim=1)
elif new_k=='output.weight':
if v.size(0)%num_shards==0:
splits = v.split(v.size(0)//num_shards,dim=0)
else:
size_list = [v.size(0)//num_shards] * num_shards
size_list[-1] += v.size(0)%num_shards
splits = v.split(size_list, dim=0) # 13B: size_list == [24976,24977]
elif new_k=='norm.weight':
splits = [v] * num_shards
elif 'ffn_norm.weight' in new_k:
splits = [v] * num_shards
elif 'attention_norm.weight' in new_k:
splits = [v] * num_shards
elif 'w1.weight' in new_k:
splits = v.split(v.size(0)//num_shards,dim=0)
elif 'w2.weight' in new_k:
splits = v.split(v.size(1)//num_shards,dim=1)
elif 'w3.weight' in new_k:
splits = v.split(v.size(0)//num_shards,dim=0)
elif 'wo.weight' in new_k:
splits = v.split(v.size(1)//num_shards,dim=1)
elif 'wv.weight' in new_k:
splits = v.split(v.size(0)//num_shards,dim=0)
elif "wq.weight" in new_k or "wk.weight" in new_k:
v = unpermute(v)
splits = v.split(v.size(0)//num_shards,dim=0)
else:
print(f"Unexpected key {new_k}")
raise ValueError
if verbose:
print(f"Processing {new_k}")
for sd,split in zip(new_state_dicts,splits):
sd[new_k] = split.clone()
del split
del splits
del model_sd[k],v
gc.collect() # Effectively enforce garbage collection
os.makedirs(output_dir, exist_ok=True)
for i,new_state_dict in enumerate(new_state_dicts):
print(f"Saving shard {i+1} of {num_shards} into {output_dir}/{prefix}consolidated.0{i}.pth")
torch.save(new_state_dict, output_dir + f"/{prefix}consolidated.0{i}.pth") | Convert and save the HF format weights to PTH format weights |
179,389 | import argparse
import json
import os
import gc
import torch
import peft
from transformers import LlamaTokenizer
from transformers.modeling_utils import dtype_byte_size
from huggingface_hub import snapshot_download
import re
import shutil
def merge_shards(output_dir, num_shards: int):
ckpt_filenames = sorted([f for f in os.listdir(output_dir) if re.match('L(\d+)-consolidated.(\d+).pth',f)])
for i in range(num_shards):
shards_filenames = sorted([f for f in ckpt_filenames if re.match(f'L(\d+)-consolidated.0{i}.pth',f)])
print(f"Loading {shards_filenames} ...")
shards_dicts = [torch.load(os.path.join(output_dir,fn)) for fn in shards_filenames]
shards_merged = {}
for d in shards_dicts:
shards_merged |= d
print(f"Saving the merged shard to " + os.path.join(output_dir, f"consolidated.0{i}.pth"))
torch.save(shards_merged, os.path.join(output_dir, f"consolidated.0{i}.pth"))
print("Cleaning up...")
del shards_merged
for d in shards_dicts:
del d
del shards_dicts
gc.collect() # Effectively enforce garbage collection
for fn in shards_filenames:
os.remove(os.path.join(output_dir,fn)) | null |
179,390 | from __future__ import annotations
import datetime as dt
import logging
from typing import Dict, List, Optional
from dateutil.parser import parse
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from metricflow.engine.metricflow_engine import (
MetricFlowEngine,
MetricFlowExplainResult,
MetricFlowQueryRequest,
MetricFlowQueryResult,
)
from metricflow.engine.models import Dimension, Metric
from metricflow.model.semantic_manifest_lookup import SemanticManifestLookup
from metricflow.protocols.sql_client import SqlClient
from metricflow.sql.optimizer.optimization_levels import SqlQueryOptimizationLevel
The provided code snippet includes necessary dependencies for implementing the `_convert_to_datetime` function. Write a Python function `def _convert_to_datetime(datetime_str: Optional[str]) -> Optional[dt.datetime]` to solve the following problem:
Callback to convert string to datetime given as an iso8601 timestamp.
Here is the function:
def _convert_to_datetime(datetime_str: Optional[str]) -> Optional[dt.datetime]:
"""Callback to convert string to datetime given as an iso8601 timestamp."""
if datetime_str is None:
return None
try:
return parse(datetime_str)
except Exception:
raise ValueError(f"'{datetime_str}' is not a valid iso8601 timestamp") | Callback to convert string to datetime given as an iso8601 timestamp. |
179,391 | from __future__ import annotations
import logging
import pprint
from collections.abc import Mapping
from dataclasses import fields, is_dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Sized, Union
from pydantic import BaseModel
from metricflow.mf_logging.formatting import indent
def mf_pformat( # type: ignore
obj: Any,
max_line_length: int = 120,
indent_prefix: str = " ",
include_object_field_names: bool = True,
include_none_object_fields: bool = False,
include_empty_object_fields: bool = False,
) -> str:
"""Print objects in a pretty way for logging / test snapshots.
In Python 3.10, the pretty printer class will support dataclasses, so we can remove this once we're on
3.10. Also tried the prettyprint package with dataclasses, but that prints full names for the classes
e.g. a.b.MyClass and it also always added line breaks, even if an object could fit on one line, so
preferring to not use that for compactness.
e.g.
metricflow.specs.DimensionSpec(
element_name='country',
entity_links=()
),
Instead, the below will print something like:
DimensionSpec(element_name='country', entity_links=())
Also, this simplifies the object representation in some cases (e.g. Enums) and provides options for a more compact
string. This is an improvement on pformat_big_objects() in dbt-semantic-interfaces to be more compact and easier
to read.
Args:
obj: The object to convert to string.
max_line_length: If the string representation is going to be longer than this, split into multiple lines.
indent_prefix: The prefix to use for hierarchical indents.
include_object_field_names: Include field names when printing objects - e.g. Foo(bar='baz') vs Foo('baz')
include_none_object_fields: Include fields with a None value - e.g. Foo(bar=None) vs Foo()
include_empty_object_fields: Include fields that are empty - e.g. Foo(bar=()) vs Foo()
Returns:
A string representation of the object that's useful for logging / debugging.
"""
# Since this is used in logging calls, wrap with except so that a bug here doesn't result in something breaking.
try:
formatter = MetricFlowPrettyFormatter(
indent_prefix=indent_prefix,
max_line_length=max_line_length,
include_object_field_names=include_object_field_names,
include_none_object_fields=include_none_object_fields,
include_empty_object_fields=include_empty_object_fields,
)
return formatter.pretty_format(obj)
except Exception:
# This automatically includes the call trace.
logger.exception("Error pretty printing due to an exception - using str() instead.")
return str(obj)
def indent(message: str, indent_level: int = 1, indent_prefix: str = " ") -> str: # noqa: D
return textwrap.indent(message, prefix=indent_prefix * indent_level)
The provided code snippet includes necessary dependencies for implementing the `mf_pformat_many` function. Write a Python function `def mf_pformat_many( # type: ignore description: str, obj_dict: Dict[str, Any], max_line_length: int = 120, indent_prefix: str = " ", include_object_field_names: bool = True, include_none_object_fields: bool = False, include_empty_object_fields: bool = False, ) -> str` to solve the following problem:
Prints many objects in an indented form.
Here is the function:
def mf_pformat_many( # type: ignore
description: str,
obj_dict: Dict[str, Any],
max_line_length: int = 120,
indent_prefix: str = " ",
include_object_field_names: bool = True,
include_none_object_fields: bool = False,
include_empty_object_fields: bool = False,
) -> str:
"""Prints many objects in an indented form."""
lines: List[str] = [description]
for key, value in obj_dict.items():
item_block_lines = (
f"{key}:",
indent(
mf_pformat(
obj=value,
max_line_length=max(0, max_line_length - len(indent_prefix)),
indent_prefix=indent_prefix,
include_object_field_names=include_object_field_names,
include_none_object_fields=include_none_object_fields,
include_empty_object_fields=include_empty_object_fields,
),
indent_prefix=indent_prefix,
),
)
item_block = "\n".join(item_block_lines)
lines.append(item_block)
return "\n\n".join(lines) | Prints many objects in an indented form. |
179,392 | from __future__ import annotations
import functools
import logging
import time
from contextlib import contextmanager
from typing import Callable, Iterator, TypeVar
from typing_extensions import ParamSpec
logger = logging.getLogger(__name__)
ReturnType = TypeVar("ReturnType")
ParametersType = ParamSpec("ParametersType")
The provided code snippet includes necessary dependencies for implementing the `log_runtime` function. Write a Python function `def log_runtime( runtime_warning_threshold: float = 5.0, ) -> Callable[[Callable[ParametersType, ReturnType]], Callable[ParametersType, ReturnType]]` to solve the following problem:
Logs how long a function took to run. If the runtime exceeds runtime_warning_threshold, then a warning is logged.
Here is the function:
def log_runtime(
runtime_warning_threshold: float = 5.0,
) -> Callable[[Callable[ParametersType, ReturnType]], Callable[ParametersType, ReturnType]]:
"""Logs how long a function took to run.
If the runtime exceeds runtime_warning_threshold, then a warning is logged.
"""
def decorator(wrapped_function: Callable[ParametersType, ReturnType]) -> Callable[ParametersType, ReturnType]:
# wraps() preserves attributes like the __qualname__ and the docstring in the returned function.
@functools.wraps(wrapped_function)
def _inner(*args: ParametersType.args, **kwargs: ParametersType.kwargs) -> ReturnType:
# __qualname__ includes the path like MyClass.my_function
function_name = f"{wrapped_function.__qualname__}()"
start_time = time.time()
logger.info(f"Starting {function_name}")
try:
result = wrapped_function(*args, **kwargs)
finally:
runtime = time.time() - start_time
logger.info(f"Finished {function_name} in {runtime:.1f}s")
if runtime > runtime_warning_threshold:
logger.warning(f"{function_name} is slow with a runtime of {runtime:.1f}s")
return result
return _inner
return decorator | Logs how long a function took to run. If the runtime exceeds runtime_warning_threshold, then a warning is logged. |
179,393 | from __future__ import annotations
import functools
import logging
import time
from contextlib import contextmanager
from typing import Callable, Iterator, TypeVar
from typing_extensions import ParamSpec
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `log_block_runtime` function. Write a Python function `def log_block_runtime(code_block_name: str, runtime_warning_threshold: float = 5.0) -> Iterator[None]` to solve the following problem:
Logs the runtime of the enclosed code block.
Here is the function:
def log_block_runtime(code_block_name: str, runtime_warning_threshold: float = 5.0) -> Iterator[None]:
"""Logs the runtime of the enclosed code block."""
start_time = time.time()
description = f"code_block_name={repr(code_block_name)}"
logger.info(f"Starting {description}")
yield
runtime = time.time() - start_time
logger.info(f"Finished {description} in {runtime:.1f}s")
if runtime > runtime_warning_threshold:
logger.warning(f"{description} is slow with a runtime of {runtime:.1f}s") | Logs the runtime of the enclosed code block. |
179,394 | from __future__ import annotations
from typing import List
from dbt_semantic_interfaces.call_parameter_sets import ParseWhereFilterException
from dbt_semantic_interfaces.implementations.filters.where_filter import PydanticWhereFilter
from metricflow.naming.linkable_spec_name import StructuredLinkableSpecName
from metricflow.protocols.query_parameter import GroupByParameter
from metricflow.query.query_exceptions import InvalidQueryException
from metricflow.specs.query_param_implementations import DimensionOrEntityParameter, TimeDimensionParameter
class StructuredLinkableSpecName:
"""Parse a qualified name into different parts.
e.g. listing__ds__week ->
entity_links: ["listing"]
element_name: "ds"
granularity: TimeGranularity.WEEK
"""
entity_link_names: Tuple[str, ...]
element_name: str
time_granularity: Optional[TimeGranularity] = None
date_part: Optional[DatePart] = None
def from_name(qualified_name: str) -> StructuredLinkableSpecName:
"""Construct from a name e.g. listing__ds__month."""
name_parts = qualified_name.split(DUNDER)
# No dunder, e.g. "ds"
if len(name_parts) == 1:
return StructuredLinkableSpecName(entity_link_names=(), element_name=name_parts[0])
for date_part in DatePart:
if name_parts[-1] == StructuredLinkableSpecName.date_part_suffix(date_part=date_part):
raise ValueError(
"Dunder syntax not supported for querying date_part. Use `group_by` object syntax instead."
)
associated_granularity = None
for granularity in TimeGranularity:
if name_parts[-1] == granularity.value:
associated_granularity = granularity
# Has a time granularity
if associated_granularity:
# e.g. "ds__month"
if len(name_parts) == 2:
return StructuredLinkableSpecName(
entity_link_names=(), element_name=name_parts[0], time_granularity=associated_granularity
)
# e.g. "messages__ds__month"
return StructuredLinkableSpecName(
entity_link_names=tuple(name_parts[:-2]),
element_name=name_parts[-2],
time_granularity=associated_granularity,
)
# e.g. "messages__ds"
else:
return StructuredLinkableSpecName(entity_link_names=tuple(name_parts[:-1]), element_name=name_parts[-1])
def qualified_name(self) -> str:
"""Return the full name form. e.g. ds or listing__ds__month.
If date_part is specified, don't include granularity in qualified_name since it will not impact the result.
"""
items = list(self.entity_link_names) + [self.element_name]
if self.date_part:
items.append(self.date_part_suffix(date_part=self.date_part))
elif self.time_granularity:
items.append(self.time_granularity.value)
return DUNDER.join(items)
def entity_prefix(self) -> Optional[str]:
"""Return the entity prefix. e.g. listing__ds__month -> listing."""
if len(self.entity_link_names) > 0:
return DUNDER.join(self.entity_link_names)
return None
def date_part_suffix(date_part: DatePart) -> str:
"""Suffix used for names with a date_part."""
return f"extract_{date_part.value}"
def granularity_free_qualified_name(self) -> str:
"""Renders the qualified name without the granularity suffix.
In the list metrics and list dimensions outputs we want to render the qualified name of the dimension, but
without including the base granularity for time dimensions. This method is useful in those contexts.
Note: in most cases you should be using the qualified_name - this is only useful in cases where the
Dimension set has de-duplicated TimeDimensions such that you never have more than one granularity
in your set for each TimeDimension.
"""
return StructuredLinkableSpecName(
entity_link_names=self.entity_link_names, element_name=self.element_name
).qualified_name
GroupByParameter = Union[DimensionOrEntityQueryParameter, TimeDimensionQueryParameter]
class InvalidQueryException(Exception):
"""Exception thrown when there is an error with the parameters to a MF query."""
pass
class TimeDimensionParameter(ProtocolHint[TimeDimensionQueryParameter]):
"""Time dimension requested in a query."""
def _implements_protocol(self) -> TimeDimensionQueryParameter:
return self
name: str
grain: Optional[TimeGranularity] = None
date_part: Optional[DatePart] = None
def __post_init__(self) -> None: # noqa: D
parsed_name = StructuredLinkableSpecName.from_name(self.name)
if parsed_name.time_granularity:
raise ValueError("Must use object syntax for `grain` parameter if `date_part` is requested.")
def query_resolver_input(self) -> ResolverInputForGroupByItem: # noqa: D
fields_to_compare = [
ParameterSetField.ELEMENT_NAME,
ParameterSetField.ENTITY_LINKS,
ParameterSetField.DATE_PART,
]
if self.grain is not None:
fields_to_compare.append(ParameterSetField.TIME_GRANULARITY)
name_structure = StructuredLinkableSpecName.from_name(self.name.lower())
return ResolverInputForGroupByItem(
input_obj=self,
input_obj_naming_scheme=ObjectBuilderNamingScheme(),
spec_pattern=EntityLinkPattern(
EntityLinkPatternParameterSet.from_parameters(
fields_to_compare=tuple(fields_to_compare),
element_name=name_structure.element_name,
entity_links=tuple(EntityReference(link_name) for link_name in name_structure.entity_link_names),
time_granularity=self.grain,
date_part=self.date_part,
)
),
)
class DimensionOrEntityParameter(ProtocolHint[DimensionOrEntityQueryParameter]):
"""Group by parameter requested in a query.
Might represent an entity or a dimension.
"""
name: str
def _implements_protocol(self) -> DimensionOrEntityQueryParameter:
return self
def query_resolver_input(self) -> ResolverInputForGroupByItem: # noqa: D
name_structure = StructuredLinkableSpecName.from_name(self.name.lower())
return ResolverInputForGroupByItem(
input_obj=self,
input_obj_naming_scheme=ObjectBuilderNamingScheme(),
spec_pattern=EntityLinkPattern(
EntityLinkPatternParameterSet.from_parameters(
fields_to_compare=(
ParameterSetField.ELEMENT_NAME,
ParameterSetField.ENTITY_LINKS,
ParameterSetField.DATE_PART,
),
element_name=name_structure.element_name,
entity_links=tuple(EntityReference(link_name) for link_name in name_structure.entity_link_names),
time_granularity=None,
date_part=None,
)
),
)
The provided code snippet includes necessary dependencies for implementing the `parse_object_builder_naming_scheme` function. Write a Python function `def parse_object_builder_naming_scheme(group_by_item_name: str) -> GroupByParameter` to solve the following problem:
Parses a string following the object-builder naming scheme into the corresponding GroupByParameter. The implementation of the query parameter classes seems incomplete and there needs to be follow up with the author of the query interface classes for the best approach. Right now, it seems like using the where filter is the only way to handle this conversion. However, it seems like this functionality should be abstracted into a module that handles operations related to the object-builder naming scheme. There is an additional issue where conversion from the element name / entity path to the name field in the query parameter objects requires going through StructuredLinkableSpecName. TODO: Replace this method once ideal implementations are in place.
Here is the function:
def parse_object_builder_naming_scheme(group_by_item_name: str) -> GroupByParameter:
"""Parses a string following the object-builder naming scheme into the corresponding GroupByParameter.
The implementation of the query parameter classes seems incomplete and there needs to be follow up with the author
of the query interface classes for the best approach. Right now, it seems like using the where filter is the only
way to handle this conversion. However, it seems like this functionality should be abstracted into a module that
handles operations related to the object-builder naming scheme. There is an additional issue where conversion
from the element name / entity path to the name field in the query parameter objects requires going through
StructuredLinkableSpecName.
TODO: Replace this method once ideal implementations are in place.
"""
try:
call_parameter_sets = PydanticWhereFilter(
where_sql_template="{{ " + group_by_item_name + " }}"
).call_parameter_sets
except ParseWhereFilterException as e:
raise InvalidQueryException(f"Error parsing `{group_by_item_name}`") from e
group_by_parameters: List[GroupByParameter] = []
for dimension_call_parameter_set in call_parameter_sets.dimension_call_parameter_sets:
if len(dimension_call_parameter_set.entity_path) != 1:
raise NotImplementedError(
f"DimensionOrEntityParameter only supports a single item in the entity path. Got "
f"{dimension_call_parameter_set} while handling `{group_by_item_name}`"
)
group_by_parameters.append(
DimensionOrEntityParameter(
name=StructuredLinkableSpecName(
element_name=dimension_call_parameter_set.dimension_reference.element_name,
entity_link_names=tuple(
entity_reference.element_name for entity_reference in dimension_call_parameter_set.entity_path
),
).qualified_name
)
)
for entity_call_parameter_set in call_parameter_sets.entity_call_parameter_sets:
if len(entity_call_parameter_set.entity_path) != 1:
raise NotImplementedError(
f"DimensionOrEntityParameter only supports a single item in the entity path. Got "
f"{entity_call_parameter_set} while handling `{group_by_item_name}`"
)
group_by_parameters.append(
DimensionOrEntityParameter(
name=StructuredLinkableSpecName(
element_name=entity_call_parameter_set.entity_reference.element_name,
entity_link_names=tuple(
entity_reference.element_name for entity_reference in entity_call_parameter_set.entity_path
),
).qualified_name
)
)
for time_dimension_parameter_set in call_parameter_sets.time_dimension_call_parameter_sets:
group_by_parameters.append(
TimeDimensionParameter(
name=StructuredLinkableSpecName(
element_name=time_dimension_parameter_set.time_dimension_reference.element_name,
entity_link_names=tuple(
entity_reference.element_name for entity_reference in time_dimension_parameter_set.entity_path
),
).qualified_name,
grain=time_dimension_parameter_set.time_granularity,
)
)
if len(group_by_parameters) != 1:
raise InvalidQueryException(
f"Did not get exactly 1 parameter while parsing `{group_by_item_name}`. Got: {group_by_parameters}"
)
return group_by_parameters[0] | Parses a string following the object-builder naming scheme into the corresponding GroupByParameter. The implementation of the query parameter classes seems incomplete and there needs to be follow up with the author of the query interface classes for the best approach. Right now, it seems like using the where filter is the only way to handle this conversion. However, it seems like this functionality should be abstracted into a module that handles operations related to the object-builder naming scheme. There is an additional issue where conversion from the element name / entity path to the name field in the query parameter objects requires going through StructuredLinkableSpecName. TODO: Replace this method once ideal implementations are in place. |
179,395 | from __future__ import annotations
import itertools
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from hashlib import sha1
from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, Sequence, Tuple, TypeVar, Union
from dbt_semantic_interfaces.dataclass_serialization import SerializableDataclass
from dbt_semantic_interfaces.implementations.metric import PydanticMetricTimeWindow
from dbt_semantic_interfaces.naming.keywords import DUNDER, METRIC_TIME_ELEMENT_NAME
from dbt_semantic_interfaces.protocols import MetricTimeWindow, WhereFilterIntersection
from dbt_semantic_interfaces.references import (
DimensionReference,
EntityReference,
MeasureReference,
MetricReference,
TimeDimensionReference,
)
from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType
from dbt_semantic_interfaces.type_enums.date_part import DatePart
from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity
from typing_extensions import override
from metricflow.aggregation_properties import AggregationState
from metricflow.collection_helpers.merger import Mergeable
from metricflow.filters.time_constraint import TimeRangeConstraint
from metricflow.naming.linkable_spec_name import StructuredLinkableSpecName
from metricflow.sql.sql_bind_parameters import SqlBindParameters
from metricflow.sql.sql_column_type import SqlColumnType
from metricflow.sql.sql_plan import SqlJoinType
from metricflow.visitor import VisitorOutputT
SqlColumnType = Union[str, int, float, datetime.datetime, datetime.date, bool]
The provided code snippet includes necessary dependencies for implementing the `hash_items` function. Write a Python function `def hash_items(items: Sequence[SqlColumnType]) -> str` to solve the following problem:
Produces a hash from a list of strings.
Here is the function:
def hash_items(items: Sequence[SqlColumnType]) -> str:
"""Produces a hash from a list of strings."""
hash_builder = sha1()
for item in items:
hash_builder.update(str(item).encode("utf-8"))
return hash_builder.hexdigest() | Produces a hash from a list of strings. |
179,396 | from __future__ import annotations
import contextlib
from abc import ABC, abstractmethod
from dataclasses import InitVar, dataclass, field
from datetime import date, datetime
from enum import Enum
from typing import Callable, ContextManager, Dict, Generic, Iterator, List, Optional, TypeVar
from metricflow.dataflow.sql_column import SqlColumn
from metricflow.dataflow.sql_table import SqlTable
from metricflow.inference.context.base import InferenceContext, InferenceContextProvider
from metricflow.protocols.sql_client import SqlClient
class SqlTable(PydanticCustomInputParser, FrozenBaseModel):
"""Represents a reference to a SQL table."""
db_name: Optional[str] = None
schema_name: str
table_name: str
def _from_yaml_value(cls, input: PydanticParseableValueType) -> SqlTable:
"""Parses a SqlTable from string input found in a user-provided model specification.
Raises a ValueError on any non-string input, as all user-provided specifications of table entities
should be strings conforming to the expectations defined in the from_string method.
"""
if isinstance(input, str):
return SqlTable.from_string(input)
else:
raise ValueError(
f"SqlTable inputs from model configs are expected to always be of type string, but got type "
f"{type(input)} with value: {input}"
)
def from_string(sql_str: str) -> SqlTable: # noqa: D
sql_str_split = sql_str.split(".")
if len(sql_str_split) == 2:
return SqlTable(schema_name=sql_str_split[0], table_name=sql_str_split[1])
elif len(sql_str_split) == 3:
return SqlTable(db_name=sql_str_split[0], schema_name=sql_str_split[1], table_name=sql_str_split[2])
raise RuntimeError(
f"Invalid input for a SQL table, expected form '<schema>.<table>' or '<db>.<schema>.<table>' "
f"but got: {sql_str}"
)
def sql(self) -> str:
"""Return the snippet that can be used for use in SQL queries."""
if self.db_name:
return f"{self.db_name}.{self.schema_name}.{self.table_name}"
return f"{self.schema_name}.{self.table_name}"
def parts_tuple(self) -> Union[Tuple[str, str], Tuple[str, str, str]]:
"""Return a tuple of the sql table parts."""
if self.db_name:
return (self.db_name, self.schema_name, self.table_name)
else:
return (self.schema_name, self.table_name)
def _default_table_progress(table: SqlTable, index: int, total: int) -> Iterator[None]:
yield | null |
179,397 | from __future__ import annotations
from enum import Enum
from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType
The provided code snippet includes necessary dependencies for implementing the `is_expansive` function. Write a Python function `def is_expansive(agg_type: AggregationType) -> bool` to solve the following problem:
Expansive ≝ Op( X ∪ Y ∪ ...) = Op( Op(X) ∪ Op(Y) ∪ ...). NOTE: COUNT is only expansive because it's transformed into a SUM agg during model transformation
Here is the function:
def is_expansive(agg_type: AggregationType) -> bool:
"""Expansive ≝ Op( X ∪ Y ∪ ...) = Op( Op(X) ∪ Op(Y) ∪ ...).
NOTE: COUNT is only expansive because it's transformed into a SUM agg during model transformation
"""
return agg_type in (
AggregationType.SUM,
AggregationType.MIN,
AggregationType.MAX,
AggregationType.SUM_BOOLEAN,
AggregationType.COUNT,
) | Expansive ≝ Op( X ∪ Y ∪ ...) = Op( Op(X) ∪ Op(Y) ∪ ...). NOTE: COUNT is only expansive because it's transformed into a SUM agg during model transformation |
179,398 | from __future__ import annotations
from enum import Enum
from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType
The provided code snippet includes necessary dependencies for implementing the `is_additive` function. Write a Python function `def is_additive(agg_type: AggregationType) -> bool` to solve the following problem:
Indicates that if you sum values over a dimension grouping, you will still get an accurate result for this metric.
Here is the function:
def is_additive(agg_type: AggregationType) -> bool:
"""Indicates that if you sum values over a dimension grouping, you will still get an accurate result for this metric."""
if agg_type is AggregationType.SUM or agg_type is AggregationType.SUM_BOOLEAN or agg_type is AggregationType.COUNT:
return True
elif (
agg_type is AggregationType.MIN
or agg_type is AggregationType.MAX
or agg_type is AggregationType.COUNT_DISTINCT
or agg_type is AggregationType.AVERAGE
or agg_type is AggregationType.PERCENTILE
or agg_type is AggregationType.MEDIAN
):
return False
else:
assert_values_exhausted(agg_type) | Indicates that if you sum values over a dimension grouping, you will still get an accurate result for this metric. |
179,399 | from __future__ import annotations
from enum import Enum
from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType
The provided code snippet includes necessary dependencies for implementing the `fill_nulls_with_0` function. Write a Python function `def fill_nulls_with_0(agg_type: AggregationType) -> bool` to solve the following problem:
Indicates if charts should show 0 instead of null where there are gaps in data.
Here is the function:
def fill_nulls_with_0(agg_type: AggregationType) -> bool:
"""Indicates if charts should show 0 instead of null where there are gaps in data."""
return agg_type in (
AggregationType.SUM,
AggregationType.COUNT_DISTINCT,
AggregationType.SUM_BOOLEAN,
AggregationType.COUNT,
) | Indicates if charts should show 0 instead of null where there are gaps in data. |
179,400 | from __future__ import annotations
from enum import Enum
from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType
The provided code snippet includes necessary dependencies for implementing the `can_limit_dimension_values` function. Write a Python function `def can_limit_dimension_values(agg_type: AggregationType) -> bool` to solve the following problem:
Indicates if we can limit dimension values in charts. Currently, this means: 1. The dimensions we care about most are the ones with the highest numeric values 2. We can calculate the "other" column in the postprocessor (meaning the metric is expansive)
Here is the function:
def can_limit_dimension_values(agg_type: AggregationType) -> bool:
"""Indicates if we can limit dimension values in charts.
Currently, this means:
1. The dimensions we care about most are the ones with the highest numeric values
2. We can calculate the "other" column in the postprocessor (meaning the metric is expansive)
"""
return agg_type in (AggregationType.SUM, AggregationType.SUM_BOOLEAN, AggregationType.COUNT) | Indicates if we can limit dimension values in charts. Currently, this means: 1. The dimensions we care about most are the ones with the highest numeric values 2. We can calculate the "other" column in the postprocessor (meaning the metric is expansive) |
179,401 | from __future__ import annotations
The provided code snippet includes necessary dependencies for implementing the `assert_exactly_one_arg_set` function. Write a Python function `def assert_exactly_one_arg_set(**kwargs) -> None` to solve the following problem:
Throws an assertion error if 0 or more than 1 argument is not None.
Here is the function:
def assert_exactly_one_arg_set(**kwargs) -> None: # type: ignore
"""Throws an assertion error if 0 or more than 1 argument is not None."""
num_set = 0
for value in kwargs.values():
if value is not None:
num_set += 1
assert num_set == 1, f"{num_set} argument(s) set instead of 1 in arguments: {kwargs}" | Throws an assertion error if 0 or more than 1 argument is not None. |
179,402 | from __future__ import annotations
The provided code snippet includes necessary dependencies for implementing the `assert_at_most_one_arg_set` function. Write a Python function `def assert_at_most_one_arg_set(**kwargs) -> None` to solve the following problem:
Throws an assertion error if more than 1 argument is not None.
Here is the function:
def assert_at_most_one_arg_set(**kwargs) -> None: # type: ignore
"""Throws an assertion error if more than 1 argument is not None."""
num_set = 0
for value in kwargs.values():
if value is not None:
num_set += 1
assert num_set <= 1, f"{num_set} argument(s) set instead of <=1 in arguments: {kwargs}" | Throws an assertion error if more than 1 argument is not None. |
179,403 | from __future__ import annotations
import html
import logging
import textwrap
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Generic, Sequence, TypeVar
import jinja2
from metricflow.dag.dag_to_text import MetricFlowDagTextFormatter
from metricflow.dag.id_prefix import IdPrefix
from metricflow.dag.sequential_id import SequentialIdGenerator
from metricflow.visitor import VisitorOutputT
class DisplayedProperty: # type: ignore
"""When visualizing a node as text or a graphic, the properties to display with it.
This should be displayed in the form "{key} = {str(value)}"
"""
key: str
value: Any # type: ignore
The provided code snippet includes necessary dependencies for implementing the `make_graphviz_label` function. Write a Python function `def make_graphviz_label( title: str, properties: Sequence[DisplayedProperty], title_font_size: int = 12, property_font_size: int = 6 ) -> str` to solve the following problem:
Make a graphviz label that can be used for rendering to an image. The title will be in a large font, while the properties will be listed in a table in a smaller font.
Here is the function:
def make_graphviz_label(
title: str, properties: Sequence[DisplayedProperty], title_font_size: int = 12, property_font_size: int = 6
) -> str:
"""Make a graphviz label that can be used for rendering to an image.
The title will be in a large font, while the properties will be listed in a table in a smaller font.
"""
# Convert all properties values into a HTML-safe string, then break the string into lines of 40 columns so that
# the node boxes don't get so wide. Better to pretty-print the object, but unclear how to do so.
formatted_properties = []
for displayed_property in properties:
lines = [html.escape(x) for x in textwrap.wrap(str(displayed_property.value), width=40)]
formatted_properties.append(DisplayedProperty(displayed_property.key, "<BR/>".join(lines)))
return jinja2.Template(
# Formatting here: https://graphviz.org/doc/info/shapes.html#html
textwrap.dedent(
"""\
<<TABLE BORDER="0" CELLPADDING="1" CELLSPACING="0">
<TR>
<TD ALIGN="LEFT" BALIGN="LEFT" VALIGN="TOP" COLSPAN="2"><FONT point-size="{{ title_size }}">{{ title }}</FONT></TD>
</TR>
{%- for key, value in properties %}
<TR>
<TD ALIGN="LEFT" BALIGN="LEFT" VALIGN="TOP"><FONT point-size="{{ property_size }}">{{ key }}</FONT></TD>
<TD ALIGN="LEFT" BALIGN="LEFT" VALIGN="TOP"><FONT point-size="{{ property_size }}">{{ value }}</FONT></TD>
</TR>
{%- endfor %}
</TABLE>>
"""
),
undefined=jinja2.StrictUndefined,
).render(
title=title,
title_size=title_font_size,
property_size=property_font_size,
properties=[(displayed_property.key, displayed_property.value) for displayed_property in formatted_properties],
) | Make a graphviz label that can be used for rendering to an image. The title will be in a large font, while the properties will be listed in a table in a smaller font. |
179,404 | from __future__ import annotations
from typing import List, Sequence
from metricflow.sql.sql_exprs import (
SqlAggregateFunctionExpression,
SqlColumnReference,
SqlColumnReferenceExpression,
SqlExpressionNode,
SqlFunction,
)
class SqlExpressionNode(DagNode, Visitable, ABC):
"""An SQL expression like my_table.my_column, CONCAT(a, b) or 1 + 1 that evaluates to a value."""
def __init__(self, node_id: NodeId, parent_nodes: List[SqlExpressionNode]) -> None: # noqa: D
self._parent_nodes = parent_nodes
super().__init__(node_id=node_id)
def requires_parenthesis(self) -> bool:
"""Should expression needs be rendered with parenthesis when rendering inside other expressions.
Useful for string expressions where we can't infer the structure. For example, in rendering
SqlMathExpression(operator="*", left_expr=SqlStringExpression("a"), right_expr=SqlStringExpression("b + c")
this can be used to differentiate between
a * b + c vs. a * (b + c)
"""
pass
def accept(self, visitor: SqlExpressionNodeVisitor[VisitorOutputT]) -> VisitorOutputT:
"""Called when a visitor needs to visit this node."""
pass
def bind_parameters(self) -> SqlBindParameters:
"""Execution parameters when running a query containing this expression.
* See: https://docs.sqlalchemy.org/en/14/core/tutorial.html#using-textual-sql
* Generally only defined for string expressions.
"""
return SqlBindParameters()
def parent_nodes(self) -> Sequence[SqlExpressionNode]: # noqa: D
return self._parent_nodes
def as_column_reference_expression(self) -> Optional[SqlColumnReferenceExpression]:
"""If this is a column reference expression, return self."""
return None
def as_string_expression(self) -> Optional[SqlStringExpression]:
"""If this is a string expression, return self."""
return None
def rewrite(
self,
column_replacements: Optional[SqlColumnReplacements] = None,
should_render_table_alias: Optional[bool] = None,
) -> SqlExpressionNode:
"""Return the same semantic expression but with re-written according to the input.
Args:
column_replacements: Replaces column references according to this map.
should_render_table_alias: Change if table aliases should be rendered for column reference expressions.
"""
pass
def lineage(self) -> SqlExpressionTreeLineage:
"""Returns all nodes in the paths from this node to the root nodes."""
pass
def _parents_match(self, other: SqlExpressionNode) -> bool: # noqa: D
return all(x == y for x, y in itertools.zip_longest(self.parent_nodes, other.parent_nodes))
def matches(self, other: SqlExpressionNode) -> bool:
"""Similar to equals - returns true if these expressions are equivalent."""
pass
class SqlColumnReference:
"""Used with string expressions to specify what columns are referred to in the string expression."""
table_alias: str
column_name: str
class SqlColumnReferenceExpression(SqlExpressionNode):
"""An expression that evaluates to the value of a column in one of the sources in the select query.
e.g. my_table.my_column
"""
def __init__(self, col_ref: SqlColumnReference, should_render_table_alias: bool = True) -> None:
"""Constructor.
Args:
col_ref: the associated column reference.
should_render_table_alias: When converting this to SQL text, whether the table alias needed to be included.
e.g. "foo.bar" vs "bar".
"""
self._col_ref = col_ref
self._should_render_table_alias = should_render_table_alias
super().__init__(node_id=self.create_unique_id(), parent_nodes=[])
def id_prefix(cls) -> IdPrefix: # noqa: D
return StaticIdPrefix.SQL_EXPR_COLUMN_REFERENCE_ID_PREFIX
def accept(self, visitor: SqlExpressionNodeVisitor[VisitorOutputT]) -> VisitorOutputT: # noqa: D
return visitor.visit_column_reference_expr(self)
def col_ref(self) -> SqlColumnReference: # noqa: D
return self._col_ref
def description(self) -> str: # noqa: D
return f"Column: {self.col_ref}"
def displayed_properties(self) -> Sequence[DisplayedProperty]: # noqa: D
return tuple(super().displayed_properties) + (DisplayedProperty("col_ref", self.col_ref),)
def requires_parenthesis(self) -> bool: # noqa: D
return False
def as_column_reference_expression(self) -> Optional[SqlColumnReferenceExpression]: # noqa:
return self
def rewrite( # noqa: D
self,
column_replacements: Optional[SqlColumnReplacements] = None,
should_render_table_alias: Optional[bool] = None,
) -> SqlExpressionNode:
# TODO: Hack to work around the fact our test data set contains "user", which is a reserved keyword.
# We should migrate "user" -> "user_id" in the test set.
# This will force "user" to be rendered as "table_alias.user"
if self.col_ref.column_name == "user":
should_render_table_alias = True
if column_replacements:
replacement = column_replacements.get_replacement(self.col_ref)
if replacement:
if should_render_table_alias is not None:
return replacement.rewrite(should_render_table_alias=should_render_table_alias)
else:
return replacement
else:
if should_render_table_alias is not None:
return SqlColumnReferenceExpression(
col_ref=self.col_ref, should_render_table_alias=should_render_table_alias
)
return self
if should_render_table_alias is not None:
return SqlColumnReferenceExpression(
col_ref=self.col_ref, should_render_table_alias=should_render_table_alias
)
return SqlColumnReferenceExpression(
col_ref=self.col_ref, should_render_table_alias=self.should_render_table_alias
)
def lineage(self) -> SqlExpressionTreeLineage: # noqa: D
return SqlExpressionTreeLineage(column_reference_exprs=(self,))
def should_render_table_alias(self) -> bool: # noqa: D
return self._should_render_table_alias
def matches(self, other: SqlExpressionNode) -> bool: # noqa: D
if not isinstance(other, SqlColumnReferenceExpression):
return False
return self.col_ref == other.col_ref
class SqlFunction(Enum):
"""Names of known SQL functions like SUM() in SELECT SUM(...).
Values are the SQL string to be used in rendering.
"""
# Aggregation functions
AVERAGE = "AVG"
# Most engines implement count_distinct as a leading DISTINCT keyword like `COUNT(DISTINCT col1, col2...)`
COUNT_DISTINCT = "COUNT"
MAX = "MAX"
MIN = "MIN"
SUM = "SUM"
# Field management functions
COALESCE = "COALESCE"
CONCAT = "CONCAT"
def distinct_aggregation_functions() -> Sequence[SqlFunction]:
"""Returns a tuple containg all currently-supported DISTINCT type aggregation functions.
This is not a property because properties don't play nicely with static/class methods.
"""
return (SqlFunction.COUNT_DISTINCT,)
def is_distinct_aggregation(function_type: SqlFunction) -> bool:
"""Convenience method to check if the input function is a distinct aggregation type.
This is useful in SQL expression rendering, as most engines implement distinct as a keyword modifier on
an argument (e.g., `COUNT(DISTINCT expr)`) while our model handling and rendering supports distinct functions
(e.g., `count_distinct(expr)`) and otherwise does not guarantee correct results when the DISTINCT keyword
is used.
"""
return function_type in SqlFunction.distinct_aggregation_functions()
def is_aggregation(function_type: SqlFunction) -> bool:
"""Returns true if the given function is an aggregation function."""
return function_type in (
SqlFunction.AVERAGE,
SqlFunction.COUNT_DISTINCT,
SqlFunction.MAX,
SqlFunction.MIN,
SqlFunction.SUM,
)
def from_aggregation_type(aggregation_type: AggregationType) -> SqlFunction:
"""Converter method to get the SqlFunction value corresponding to the given AggregationType.
Make sure to leave the else: block in place, as this enforces an exhaustive switch through the
AggregationType enumeration values.
"""
if aggregation_type is AggregationType.AVERAGE:
return SqlFunction.AVERAGE
elif aggregation_type is AggregationType.COUNT_DISTINCT:
return SqlFunction.COUNT_DISTINCT
elif aggregation_type is AggregationType.MAX:
return SqlFunction.MAX
elif aggregation_type is AggregationType.MIN:
return SqlFunction.MIN
elif aggregation_type is AggregationType.SUM:
return SqlFunction.SUM
elif aggregation_type is AggregationType.PERCENTILE:
raise RuntimeError(
f"Unhandled aggregation type {aggregation_type} - this should have been handled in percentile"
"aggregation node."
)
elif aggregation_type is AggregationType.MEDIAN:
raise RuntimeError(
f"Unhandled aggregation type {aggregation_type} - this should have been transformed to PERCENTILE "
"during model parsing."
)
elif aggregation_type is AggregationType.SUM_BOOLEAN or aggregation_type is AggregationType.COUNT:
raise RuntimeError(
f"Unhandled aggregation type {aggregation_type} - this should have been transformed to SUM "
"during model parsing."
)
else:
assert_values_exhausted(aggregation_type)
class SqlAggregateFunctionExpression(SqlFunctionExpression):
"""An aggregate function expression like SUM(1)."""
def from_aggregation_type(
aggregation_type: AggregationType, sql_column_expression: SqlColumnReferenceExpression
) -> SqlAggregateFunctionExpression:
"""Given the aggregation type, return an SQL function expression that does that aggregation on the given col."""
return SqlAggregateFunctionExpression(
sql_function=SqlFunction.from_aggregation_type(aggregation_type=aggregation_type),
sql_function_args=[sql_column_expression],
)
def __init__(self, sql_function: SqlFunction, sql_function_args: List[SqlExpressionNode]) -> None:
"""Constructor.
Args:
sql_function: The function that this represents.
sql_function_args: The arguments that should go into the function. e.g. for "CONCAT(a, b)", the arg
expressions should be "a" and "b".
"""
self._sql_function = sql_function
self._sql_function_args = sql_function_args
super().__init__(node_id=self.create_unique_id(), parent_nodes=sql_function_args)
def id_prefix(cls) -> IdPrefix: # noqa: D
return StaticIdPrefix.SQL_EXPR_FUNCTION_ID_PREFIX
def requires_parenthesis(self) -> bool: # noqa: D
return False
def accept(self, visitor: SqlExpressionNodeVisitor[VisitorOutputT]) -> VisitorOutputT: # noqa: D
return visitor.visit_function_expr(self)
def description(self) -> str: # noqa: D
return f"{self._sql_function.value} Expression"
def displayed_properties(self) -> Sequence[DisplayedProperty]: # noqa: D
return (
tuple(super().displayed_properties)
+ (DisplayedProperty("function", self.sql_function),)
+ tuple(DisplayedProperty("argument", x) for x in self.sql_function_args)
)
def sql_function(self) -> SqlFunction: # noqa: D
return self._sql_function
def sql_function_args(self) -> List[SqlExpressionNode]: # noqa: D
return self._sql_function_args
def __repr__(self) -> str: # noqa: D
return f"{self.__class__.__name__}(node_id={self.node_id}, sql_function={self.sql_function.name})"
def rewrite( # noqa: D
self,
column_replacements: Optional[SqlColumnReplacements] = None,
should_render_table_alias: Optional[bool] = None,
) -> SqlExpressionNode:
return SqlAggregateFunctionExpression(
sql_function=self.sql_function,
sql_function_args=[
x.rewrite(column_replacements, should_render_table_alias) for x in self.sql_function_args
],
)
def is_aggregate_function(self) -> bool: # noqa: D
return True
def lineage(self) -> SqlExpressionTreeLineage: # noqa: D
return SqlExpressionTreeLineage.combine(
tuple(x.lineage for x in self.parent_nodes) + (SqlExpressionTreeLineage(function_exprs=(self,)),)
)
def matches(self, other: SqlExpressionNode) -> bool: # noqa: D
if not isinstance(other, SqlAggregateFunctionExpression):
return False
return self.sql_function == other.sql_function and self._parents_match(other)
The provided code snippet includes necessary dependencies for implementing the `make_coalesced_expr` function. Write a Python function `def make_coalesced_expr(table_aliases: Sequence[str], column_alias: str) -> SqlExpressionNode` to solve the following problem:
Makes a coalesced expression of the given column from the given table aliases. e.g. table_aliases = ["a", "b"] column_alias = "is_instant" -> COALESCE(a.is_instant, b.is_instant)
Here is the function:
def make_coalesced_expr(table_aliases: Sequence[str], column_alias: str) -> SqlExpressionNode:
"""Makes a coalesced expression of the given column from the given table aliases.
e.g.
table_aliases = ["a", "b"]
column_alias = "is_instant"
->
COALESCE(a.is_instant, b.is_instant)
"""
if len(table_aliases) == 1:
return SqlColumnReferenceExpression(
col_ref=SqlColumnReference(
table_alias=table_aliases[0],
column_name=column_alias,
)
)
else:
columns_to_coalesce: List[SqlExpressionNode] = []
for table_alias in table_aliases:
columns_to_coalesce.append(
SqlColumnReferenceExpression(
col_ref=SqlColumnReference(
table_alias=table_alias,
column_name=column_alias,
)
)
)
return SqlAggregateFunctionExpression(
sql_function=SqlFunction.COALESCE,
sql_function_args=columns_to_coalesce,
) | Makes a coalesced expression of the given column from the given table aliases. e.g. table_aliases = ["a", "b"] column_alias = "is_instant" -> COALESCE(a.is_instant, b.is_instant) |
179,405 | from __future__ import annotations
import logging
from collections import OrderedDict
from dataclasses import dataclass
from itertools import chain
from typing import Dict, List, Optional, Sequence, Tuple
from dbt_semantic_interfaces.references import MetricReference, SemanticModelReference
from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType
from dbt_semantic_interfaces.type_enums.date_part import DatePart
from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity
from more_itertools import bucket
from metricflow.aggregation_properties import AggregationState
from metricflow.assert_one_arg import assert_exactly_one_arg_set
from metricflow.dataflow.nodes.join_to_base import ValidityWindowJoinDescription
from metricflow.instances import (
DimensionInstance,
EntityInstance,
InstanceSet,
InstanceSetTransform,
MdoInstance,
MeasureInstance,
MetadataInstance,
MetricInstance,
TimeDimensionInstance,
)
from metricflow.plan_conversion.select_column_gen import SelectColumnSet
from metricflow.protocols.semantics import MetricAccessor, SemanticModelAccessor
from metricflow.specs.column_assoc import ColumnAssociationResolver
from metricflow.specs.specs import (
DimensionSpec,
EntityReference,
EntitySpec,
InstanceSpec,
InstanceSpecSet,
LinkableInstanceSpec,
LinklessEntitySpec,
MeasureSpec,
MetricInputMeasureSpec,
TimeDimensionSpec,
)
from metricflow.sql.sql_exprs import (
SqlAggregateFunctionExpression,
SqlColumnReference,
SqlColumnReferenceExpression,
SqlExpressionNode,
SqlFunction,
SqlFunctionExpression,
SqlStringExpression,
)
from metricflow.sql.sql_plan import (
SqlSelectColumn,
)
class CreateSelectColumnsForInstances(InstanceSetTransform[SelectColumnSet]):
"""Create select column expressions that will express all instances in the set.
It assumes that the column names of the instances are represented by the supplied column association resolver and
come from the given table alias.
"""
def __init__(
self,
table_alias: str,
column_resolver: ColumnAssociationResolver,
output_to_input_column_mapping: Optional[OrderedDict[str, str]] = None,
) -> None:
"""Initializer.
Args:
table_alias: the table alias to select columns from
column_resolver: resolver to name columns.
output_to_input_column_mapping: if specified, use these columns in the input for the given output columns.
"""
self._table_alias = table_alias
self._column_resolver = column_resolver
self._output_to_input_column_mapping = output_to_input_column_mapping or OrderedDict()
def transform(self, instance_set: InstanceSet) -> SelectColumnSet: # noqa: D
metric_cols = list(
chain.from_iterable([self._make_sql_column_expression(x) for x in instance_set.metric_instances])
)
measure_cols = list(
chain.from_iterable([self._make_sql_column_expression(x) for x in instance_set.measure_instances])
)
dimension_cols = list(
chain.from_iterable([self._make_sql_column_expression(x) for x in instance_set.dimension_instances])
)
time_dimension_cols = list(
chain.from_iterable([self._make_sql_column_expression(x) for x in instance_set.time_dimension_instances])
)
entity_cols = list(
chain.from_iterable([self._make_sql_column_expression(x) for x in instance_set.entity_instances])
)
metadata_cols = list(
chain.from_iterable([self._make_sql_column_expression(x) for x in instance_set.metadata_instances])
)
return SelectColumnSet(
metric_columns=metric_cols,
measure_columns=measure_cols,
dimension_columns=dimension_cols,
time_dimension_columns=time_dimension_cols,
entity_columns=entity_cols,
metadata_columns=metadata_cols,
)
def _make_sql_column_expression(
self,
element_instance: MdoInstance,
) -> List[SqlSelectColumn]:
"""Convert one element instance into a SQL column."""
# Do a sanity check to make sure that there's a 1:1 mapping between the columns associations generated by the
# column resolver based on the spec, and the columns that are already associated with the instance.
expected_column_associations = (self._column_resolver.resolve_spec(element_instance.spec),)
existing_column_associations = element_instance.associated_columns
# Dict between the expected column name and the corresponding column in the existing columns
column_matches: Dict[str, List[str]] = {
expected_column.column_name: [
col.column_name
for col in existing_column_associations
if col.column_correlation_key == expected_column.column_correlation_key
]
for expected_column in expected_column_associations
}
# Assert a 1:1 mapping between expected and existing
assert all([len(x) == 1 for x in column_matches.values()]), (
f"Did not find exactly one match for each expected column associations. "
f"Expected -> existing mappings: {column_matches}"
)
existing_names = set([col.column_name for col in existing_column_associations])
mapped_names = set()
mapped_cols: List[str] = []
for mapped_cols in column_matches.values():
mapped_names.update([col_name for col_name in mapped_cols])
assert existing_names == mapped_names, (
f"Not all existing columns were mapped. Existing: {existing_names}. Mapped: {mapped_cols}, "
f"{expected_column_associations} -- {existing_column_associations}"
)
select_columns = []
for expected_name, mapped_cols in column_matches.items():
input_column_name = mapped_cols[0]
output_column_name = expected_name
if output_column_name in self._output_to_input_column_mapping:
input_column_name = self._output_to_input_column_mapping[output_column_name]
select_columns.append(
SqlSelectColumn(
expr=SqlColumnReferenceExpression(SqlColumnReference(self._table_alias, input_column_name)),
column_alias=output_column_name,
)
)
return select_columns
class InstanceSet(SerializableDataclass):
"""A set that includes all instance types.
Generally used to help represent that data that is flowing between nodes in the metric dataflow plan.
"""
measure_instances: Tuple[MeasureInstance, ...] = ()
dimension_instances: Tuple[DimensionInstance, ...] = ()
time_dimension_instances: Tuple[TimeDimensionInstance, ...] = ()
entity_instances: Tuple[EntityInstance, ...] = ()
metric_instances: Tuple[MetricInstance, ...] = ()
metadata_instances: Tuple[MetadataInstance, ...] = ()
def transform(self, transform_function: InstanceSetTransform[TransformOutputT]) -> TransformOutputT: # noqa: D
return transform_function.transform(self)
def merge(instance_sets: List[InstanceSet]) -> InstanceSet:
"""Combine all instances from all instances into a single instance set.
Instances will be de-duped based on their spec.
"""
measure_instances: List[MeasureInstance] = []
dimension_instances: List[DimensionInstance] = []
time_dimension_instances: List[TimeDimensionInstance] = []
entity_instances: List[EntityInstance] = []
metric_instances: List[MetricInstance] = []
metadata_instances: List[MetadataInstance] = []
for instance_set in instance_sets:
for measure_instance in instance_set.measure_instances:
if measure_instance.spec not in {x.spec for x in measure_instances}:
measure_instances.append(measure_instance)
for dimension_instance in instance_set.dimension_instances:
if dimension_instance.spec not in {x.spec for x in dimension_instances}:
dimension_instances.append(dimension_instance)
for time_dimension_instance in instance_set.time_dimension_instances:
if time_dimension_instance.spec not in {x.spec for x in time_dimension_instances}:
time_dimension_instances.append(time_dimension_instance)
for entity_instance in instance_set.entity_instances:
if entity_instance.spec not in {x.spec for x in entity_instances}:
entity_instances.append(entity_instance)
for metric_instance in instance_set.metric_instances:
if metric_instance.spec not in {x.spec for x in metric_instances}:
metric_instances.append(metric_instance)
for metadata_instance in instance_set.metadata_instances:
if metadata_instance.spec not in {x.spec for x in metadata_instances}:
metadata_instances.append(metadata_instance)
return InstanceSet(
measure_instances=tuple(measure_instances),
dimension_instances=tuple(dimension_instances),
time_dimension_instances=tuple(time_dimension_instances),
entity_instances=tuple(entity_instances),
metric_instances=tuple(metric_instances),
metadata_instances=tuple(metadata_instances),
)
def spec_set(self) -> InstanceSpecSet: # noqa: D
return InstanceSpecSet(
measure_specs=tuple(x.spec for x in self.measure_instances),
dimension_specs=tuple(x.spec for x in self.dimension_instances),
time_dimension_specs=tuple(x.spec for x in self.time_dimension_instances),
entity_specs=tuple(x.spec for x in self.entity_instances),
metric_specs=tuple(x.spec for x in self.metric_instances),
metadata_specs=tuple(x.spec for x in self.metadata_instances),
)
class SelectColumnSet:
"""A set of SQL select columns that represent the different instance types in a data set."""
metric_columns: List[SqlSelectColumn] = field(default_factory=list)
measure_columns: List[SqlSelectColumn] = field(default_factory=list)
dimension_columns: List[SqlSelectColumn] = field(default_factory=list)
time_dimension_columns: List[SqlSelectColumn] = field(default_factory=list)
entity_columns: List[SqlSelectColumn] = field(default_factory=list)
metadata_columns: List[SqlSelectColumn] = field(default_factory=list)
def merge(self, other_set: SelectColumnSet) -> SelectColumnSet:
"""Combine the select columns by type."""
return SelectColumnSet(
metric_columns=self.metric_columns + other_set.metric_columns,
measure_columns=self.measure_columns + other_set.measure_columns,
dimension_columns=self.dimension_columns + other_set.dimension_columns,
time_dimension_columns=self.time_dimension_columns + other_set.time_dimension_columns,
entity_columns=self.entity_columns + other_set.entity_columns,
metadata_columns=self.metadata_columns + other_set.metadata_columns,
)
def as_tuple(self) -> Tuple[SqlSelectColumn, ...]:
"""Return all select columns as a tuple."""
return tuple(
# This order was chosen to match the column sequence data consumers typically prefer.
self.time_dimension_columns
+ self.entity_columns
+ self.dimension_columns
+ self.metric_columns
+ self.measure_columns
+ self.metadata_columns
)
def without_measure_columns(self) -> SelectColumnSet:
"""Returns this but with the measure columns removed."""
return SelectColumnSet(
metric_columns=self.metric_columns,
dimension_columns=self.dimension_columns,
time_dimension_columns=self.time_dimension_columns,
entity_columns=self.entity_columns,
metadata_columns=self.metadata_columns,
)
class ColumnAssociationResolver(ABC):
"""Get the default column associations for an element instance.
This is used for naming columns in an SQL query consistently. For example, dimensions with links are
named like <entity link>__<dimension name> e.g. user_id__country, and time dimensions at a different time
granularity are named <time dimension>__<time granularity> e.g. ds__month. Having a central place to name them will
make it easier to change this later on. Names generated need to be unique within a query.
It's also important to maintain this format because customers write constraints in SQL assuming this. This
allows us to stick the constraint in as WHERE clauses without having to parse the constraint SQL.
TODO: Updates are needed for time granularity in time dimensions, ToT for metrics.
The resolve* methods should return the column associations / column names that it should use in queries for the given
spec.
"""
def resolve_spec(self, spec: InstanceSpec) -> ColumnAssociation: # noqa: D
raise NotImplementedError
class SqlSelectColumn:
"""Represents a column in the select clause of an SQL query."""
expr: SqlExpressionNode
# Always require a column alias for simplicity.
column_alias: str
The provided code snippet includes necessary dependencies for implementing the `create_select_columns_for_instance_sets` function. Write a Python function `def create_select_columns_for_instance_sets( column_resolver: ColumnAssociationResolver, table_alias_to_instance_set: OrderedDict[str, InstanceSet], ) -> Tuple[SqlSelectColumn, ...]` to solve the following problem:
Creates select columns for instance sets coming from multiple table as defined in table_alias_to_instance_set. Used in cases where you join multiple tables and need to render select columns to access all of those.
Here is the function:
def create_select_columns_for_instance_sets(
column_resolver: ColumnAssociationResolver,
table_alias_to_instance_set: OrderedDict[str, InstanceSet],
) -> Tuple[SqlSelectColumn, ...]:
"""Creates select columns for instance sets coming from multiple table as defined in table_alias_to_instance_set.
Used in cases where you join multiple tables and need to render select columns to access all of those.
"""
column_set = SelectColumnSet()
for table_alias, instance_set in table_alias_to_instance_set.items():
column_set = column_set.merge(
instance_set.transform(
CreateSelectColumnsForInstances(
table_alias=table_alias,
column_resolver=column_resolver,
)
)
)
return column_set.as_tuple() | Creates select columns for instance sets coming from multiple table as defined in table_alias_to_instance_set. Used in cases where you join multiple tables and need to render select columns to access all of those. |
179,406 | from __future__ import annotations
import logging
from collections import OrderedDict
from typing import List, Optional, Sequence, Tuple, Union
from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
from dbt_semantic_interfaces.naming.keywords import METRIC_TIME_ELEMENT_NAME
from dbt_semantic_interfaces.protocols.metric import MetricInputMeasure, MetricType
from dbt_semantic_interfaces.references import EntityReference, MetricModelReference
from dbt_semantic_interfaces.type_enums.aggregation_type import AggregationType
from dbt_semantic_interfaces.type_enums.conversion_calculation_type import ConversionCalculationType
from dbt_semantic_interfaces.validations.unique_valid_name import MetricFlowReservedKeywords
from metricflow.aggregation_properties import AggregationState
from metricflow.dag.id_prefix import StaticIdPrefix
from metricflow.dag.mf_dag import DagId
from metricflow.dag.sequential_id import SequentialIdGenerator
from metricflow.dataflow.dataflow_plan import (
BaseOutput,
ComputedMetricsOutput,
DataflowPlanNodeVisitor,
)
from metricflow.dataflow.nodes.add_generated_uuid import AddGeneratedUuidColumnNode
from metricflow.dataflow.nodes.aggregate_measures import AggregateMeasuresNode
from metricflow.dataflow.nodes.combine_aggregated_outputs import CombineAggregatedOutputsNode
from metricflow.dataflow.nodes.compute_metrics import ComputeMetricsNode
from metricflow.dataflow.nodes.constrain_time import ConstrainTimeRangeNode
from metricflow.dataflow.nodes.filter_elements import FilterElementsNode
from metricflow.dataflow.nodes.join_conversion_events import JoinConversionEventsNode
from metricflow.dataflow.nodes.join_over_time import JoinOverTimeRangeNode
from metricflow.dataflow.nodes.join_to_base import JoinToBaseOutputNode
from metricflow.dataflow.nodes.join_to_time_spine import JoinToTimeSpineNode
from metricflow.dataflow.nodes.metric_time_transform import MetricTimeDimensionTransformNode
from metricflow.dataflow.nodes.min_max import MinMaxNode
from metricflow.dataflow.nodes.order_by_limit import OrderByLimitNode
from metricflow.dataflow.nodes.read_sql_source import ReadSqlSourceNode
from metricflow.dataflow.nodes.semi_additive_join import SemiAdditiveJoinNode
from metricflow.dataflow.nodes.where_filter import WhereConstraintNode
from metricflow.dataflow.nodes.write_to_dataframe import WriteToResultDataframeNode
from metricflow.dataflow.nodes.write_to_table import WriteToResultTableNode
from metricflow.dataset.dataset import DataSet
from metricflow.dataset.sql_dataset import SqlDataSet
from metricflow.filters.time_constraint import TimeRangeConstraint
from metricflow.instances import InstanceSet, MetadataInstance, MetricInstance, TimeDimensionInstance
from metricflow.mf_logging.formatting import indent
from metricflow.model.semantic_manifest_lookup import SemanticManifestLookup
from metricflow.plan_conversion.instance_converters import (
AddLinkToLinkableElements,
AddMetadata,
AddMetrics,
AliasAggregatedMeasures,
ChangeAssociatedColumns,
ChangeMeasureAggregationState,
ConvertToMetadata,
CreateSelectColumnForCombineOutputNode,
CreateSelectColumnsForInstances,
CreateSelectColumnsWithMeasuresAggregated,
CreateSqlColumnReferencesForInstances,
FilterElements,
FilterLinkableInstancesWithLeadingLink,
RemoveMeasures,
RemoveMetrics,
UpdateMeasureFillNullsWith,
create_select_columns_for_instance_sets,
)
from metricflow.plan_conversion.select_column_gen import (
SelectColumnSet,
)
from metricflow.plan_conversion.spec_transforms import (
CreateColumnAssociations,
CreateSelectCoalescedColumnsForLinkableSpecs,
SelectOnlyLinkableSpecs,
)
from metricflow.plan_conversion.sql_join_builder import (
AnnotatedSqlDataSet,
ColumnEqualityDescription,
SqlQueryPlanJoinBuilder,
)
from metricflow.plan_conversion.time_spine import TIME_SPINE_DATA_SET_DESCRIPTION, TimeSpineSource
from metricflow.protocols.sql_client import SqlEngine
from metricflow.specs.column_assoc import ColumnAssociation, ColumnAssociationResolver, SingleColumnCorrelationKey
from metricflow.specs.specs import (
InstanceSpecSet,
MeasureSpec,
MetadataSpec,
MetricSpec,
TimeDimensionSpec,
)
from metricflow.sql.optimizer.optimization_levels import (
SqlQueryOptimizationLevel,
SqlQueryOptimizerConfiguration,
)
from metricflow.sql.sql_exprs import (
SqlAggregateFunctionExpression,
SqlBetweenExpression,
SqlColumnReference,
SqlColumnReferenceExpression,
SqlComparison,
SqlComparisonExpression,
SqlDateTruncExpression,
SqlExpressionNode,
SqlExtractExpression,
SqlFunction,
SqlFunctionExpression,
SqlGenerateUuidExpression,
SqlLogicalExpression,
SqlLogicalOperator,
SqlRatioComputationExpression,
SqlStringExpression,
SqlStringLiteralExpression,
SqlWindowFunction,
SqlWindowFunctionExpression,
SqlWindowOrderByArgument,
)
from metricflow.sql.sql_plan import (
SqlJoinDescription,
SqlJoinType,
SqlOrderByDescription,
SqlQueryPlan,
SqlQueryPlanNode,
SqlSelectColumn,
SqlSelectStatementNode,
SqlTableFromClauseNode,
)
from metricflow.time.time_constants import ISO8601_PYTHON_FORMAT
class TimeRangeConstraint(SerializableDataclass):
"""Describes how the time dimension for metrics should be constrained."""
start_time: datetime.datetime
end_time: datetime.datetime
def __post_init__(self) -> None: # noqa: D
if self.start_time > self.end_time:
logger.warning(f"start_time must not be > end_time. start_time={self.start_time} end_time={self.end_time}")
if self.start_time < TimeRangeConstraint.ALL_TIME_BEGIN():
logger.warning(f"start_time={self.start_time} exceeds the limits of {TimeRangeConstraint.ALL_TIME_BEGIN()}")
if self.end_time > TimeRangeConstraint.ALL_TIME_END():
raise RuntimeError(f"end_time={self.end_time} exceeds the limits of {TimeRangeConstraint.ALL_TIME_END()}")
def ALL_TIME_BEGIN() -> datetime.datetime: # noqa: D
return datetime.datetime(2000, 1, 1)
def ALL_TIME_END() -> datetime.datetime: # noqa: D
return datetime.datetime(2040, 12, 31)
def all_time() -> TimeRangeConstraint:
"""Return the range representing all time.
This could also be represented with None as the ends, but doing this makes the logic simpler in many cases.
"""
return TimeRangeConstraint(
start_time=TimeRangeConstraint.ALL_TIME_BEGIN(),
end_time=TimeRangeConstraint.ALL_TIME_END(),
)
def empty_time() -> TimeRangeConstraint:
"""Return the range representing no time."""
return TimeRangeConstraint(
start_time=TimeRangeConstraint.ALL_TIME_BEGIN(),
end_time=TimeRangeConstraint.ALL_TIME_BEGIN(),
)
def _adjust_time_constraint_start_by_window(
self,
time_granularity: TimeGranularity,
time_unit_count: int,
) -> TimeRangeConstraint:
"""Moves the start of the time constraint back by <time_unit_count> windows.
if the metric is weekly-active-users (ie window = 1 week) it moves time_constraint.start one week earlier
"""
start_ts = pd.Timestamp(self.start_time)
offset = offset_period(time_granularity) * time_unit_count
adjusted_start = (start_ts - offset).to_pydatetime()
return TimeRangeConstraint(
start_time=adjusted_start,
end_time=self.end_time,
)
def adjust_time_constraint_for_cumulative_metric(
self, granularity: Optional[TimeGranularity], count: int
) -> TimeRangeConstraint:
"""Given a time constraint for the overall query, adjust it to cover the time range for this metric."""
if granularity is not None:
return self._adjust_time_constraint_start_by_window(granularity, count)
# if no window is specified we want to accumulate from the beginning of time
return TimeRangeConstraint(
start_time=TimeRangeConstraint.ALL_TIME_BEGIN(),
end_time=self.end_time,
)
def is_subset_of(self, other: TimeRangeConstraint) -> bool: # noqa: D
return self.start_time >= other.start_time and self.end_time <= other.end_time
def __str__(self) -> str: # noqa: D
return f"[{self.start_time.isoformat()}, {self.end_time.isoformat()}]"
def __repr__(self) -> str: # noqa: D
return (
f"{self.__class__.__name__}(start_time='{self.start_time.isoformat()}', "
f"end_time='{self.end_time.isoformat()}')"
)
def intersection(self, other: TimeRangeConstraint) -> TimeRangeConstraint: # noqa: D
# self is completely before the other
if self.end_time < other.start_time:
return TimeRangeConstraint.empty_time()
# self starts before the start of other, and self ends within other
elif self.start_time <= other.start_time <= self.end_time <= other.end_time:
return TimeRangeConstraint(
start_time=other.start_time,
end_time=self.end_time,
)
# self starts before the start of other, and self ends after other
elif self.start_time <= other.start_time <= other.end_time <= self.end_time:
return other
# self starts after the start of other, and self ends within other:
elif other.start_time <= self.start_time <= self.end_time <= other.end_time:
return self
# self starts after the start of other, and self ends after other:
elif other.start_time <= self.start_time <= other.end_time <= self.end_time:
return TimeRangeConstraint(
start_time=self.start_time,
end_time=other.end_time,
)
# self is completely after other
elif self.start_time > other.end_time:
return TimeRangeConstraint.empty_time()
else:
raise RuntimeError(f"Unhandled case - self: {self} other: {other}")
class SqlExpressionNode(DagNode, Visitable, ABC):
"""An SQL expression like my_table.my_column, CONCAT(a, b) or 1 + 1 that evaluates to a value."""
def __init__(self, node_id: NodeId, parent_nodes: List[SqlExpressionNode]) -> None: # noqa: D
self._parent_nodes = parent_nodes
super().__init__(node_id=node_id)
def requires_parenthesis(self) -> bool:
"""Should expression needs be rendered with parenthesis when rendering inside other expressions.
Useful for string expressions where we can't infer the structure. For example, in rendering
SqlMathExpression(operator="*", left_expr=SqlStringExpression("a"), right_expr=SqlStringExpression("b + c")
this can be used to differentiate between
a * b + c vs. a * (b + c)
"""
pass
def accept(self, visitor: SqlExpressionNodeVisitor[VisitorOutputT]) -> VisitorOutputT:
"""Called when a visitor needs to visit this node."""
pass
def bind_parameters(self) -> SqlBindParameters:
"""Execution parameters when running a query containing this expression.
* See: https://docs.sqlalchemy.org/en/14/core/tutorial.html#using-textual-sql
* Generally only defined for string expressions.
"""
return SqlBindParameters()
def parent_nodes(self) -> Sequence[SqlExpressionNode]: # noqa: D
return self._parent_nodes
def as_column_reference_expression(self) -> Optional[SqlColumnReferenceExpression]:
"""If this is a column reference expression, return self."""
return None
def as_string_expression(self) -> Optional[SqlStringExpression]:
"""If this is a string expression, return self."""
return None
def rewrite(
self,
column_replacements: Optional[SqlColumnReplacements] = None,
should_render_table_alias: Optional[bool] = None,
) -> SqlExpressionNode:
"""Return the same semantic expression but with re-written according to the input.
Args:
column_replacements: Replaces column references according to this map.
should_render_table_alias: Change if table aliases should be rendered for column reference expressions.
"""
pass
def lineage(self) -> SqlExpressionTreeLineage:
"""Returns all nodes in the paths from this node to the root nodes."""
pass
def _parents_match(self, other: SqlExpressionNode) -> bool: # noqa: D
return all(x == y for x, y in itertools.zip_longest(self.parent_nodes, other.parent_nodes))
def matches(self, other: SqlExpressionNode) -> bool:
"""Similar to equals - returns true if these expressions are equivalent."""
pass
class SqlStringLiteralExpression(SqlExpressionNode):
"""A string literal like 'foo'. It shouldn't include delimiters as it should be added during rendering."""
def __init__(self, literal_value: str) -> None: # noqa: D
self._literal_value = literal_value
super().__init__(node_id=self.create_unique_id(), parent_nodes=[])
def id_prefix(cls) -> IdPrefix: # noqa: D
return StaticIdPrefix.SQL_EXPR_STRING_LITERAL_PREFIX
def accept(self, visitor: SqlExpressionNodeVisitor[VisitorOutputT]) -> VisitorOutputT: # noqa: D
return visitor.visit_string_literal_expr(self)
def description(self) -> str: # noqa: D
return f"String Literal: {self._literal_value}"
def displayed_properties(self) -> Sequence[DisplayedProperty]: # noqa: D
return tuple(super().displayed_properties) + (DisplayedProperty("value", self._literal_value),)
def literal_value(self) -> str: # noqa: D
return self._literal_value
def requires_parenthesis(self) -> bool: # noqa: D
return False
def bind_parameters(self) -> SqlBindParameters: # noqa: D
return SqlBindParameters()
def __repr__(self) -> str: # noqa: D
return f"{self.__class__.__name__}(node_id={self.node_id}, literal_value={self.literal_value})"
def rewrite( # noqa: D
self,
column_replacements: Optional[SqlColumnReplacements] = None,
should_render_table_alias: Optional[bool] = None,
) -> SqlExpressionNode:
return self
def lineage(self) -> SqlExpressionTreeLineage: # noqa: D
return SqlExpressionTreeLineage(other_exprs=(self,))
def matches(self, other: SqlExpressionNode) -> bool: # noqa: D
if not isinstance(other, SqlStringLiteralExpression):
return False
return self.literal_value == other.literal_value
class SqlColumnReference:
"""Used with string expressions to specify what columns are referred to in the string expression."""
table_alias: str
column_name: str
class SqlColumnReferenceExpression(SqlExpressionNode):
"""An expression that evaluates to the value of a column in one of the sources in the select query.
e.g. my_table.my_column
"""
def __init__(self, col_ref: SqlColumnReference, should_render_table_alias: bool = True) -> None:
"""Constructor.
Args:
col_ref: the associated column reference.
should_render_table_alias: When converting this to SQL text, whether the table alias needed to be included.
e.g. "foo.bar" vs "bar".
"""
self._col_ref = col_ref
self._should_render_table_alias = should_render_table_alias
super().__init__(node_id=self.create_unique_id(), parent_nodes=[])
def id_prefix(cls) -> IdPrefix: # noqa: D
return StaticIdPrefix.SQL_EXPR_COLUMN_REFERENCE_ID_PREFIX
def accept(self, visitor: SqlExpressionNodeVisitor[VisitorOutputT]) -> VisitorOutputT: # noqa: D
return visitor.visit_column_reference_expr(self)
def col_ref(self) -> SqlColumnReference: # noqa: D
return self._col_ref
def description(self) -> str: # noqa: D
return f"Column: {self.col_ref}"
def displayed_properties(self) -> Sequence[DisplayedProperty]: # noqa: D
return tuple(super().displayed_properties) + (DisplayedProperty("col_ref", self.col_ref),)
def requires_parenthesis(self) -> bool: # noqa: D
return False
def as_column_reference_expression(self) -> Optional[SqlColumnReferenceExpression]: # noqa:
return self
def rewrite( # noqa: D
self,
column_replacements: Optional[SqlColumnReplacements] = None,
should_render_table_alias: Optional[bool] = None,
) -> SqlExpressionNode:
# TODO: Hack to work around the fact our test data set contains "user", which is a reserved keyword.
# We should migrate "user" -> "user_id" in the test set.
# This will force "user" to be rendered as "table_alias.user"
if self.col_ref.column_name == "user":
should_render_table_alias = True
if column_replacements:
replacement = column_replacements.get_replacement(self.col_ref)
if replacement:
if should_render_table_alias is not None:
return replacement.rewrite(should_render_table_alias=should_render_table_alias)
else:
return replacement
else:
if should_render_table_alias is not None:
return SqlColumnReferenceExpression(
col_ref=self.col_ref, should_render_table_alias=should_render_table_alias
)
return self
if should_render_table_alias is not None:
return SqlColumnReferenceExpression(
col_ref=self.col_ref, should_render_table_alias=should_render_table_alias
)
return SqlColumnReferenceExpression(
col_ref=self.col_ref, should_render_table_alias=self.should_render_table_alias
)
def lineage(self) -> SqlExpressionTreeLineage: # noqa: D
return SqlExpressionTreeLineage(column_reference_exprs=(self,))
def should_render_table_alias(self) -> bool: # noqa: D
return self._should_render_table_alias
def matches(self, other: SqlExpressionNode) -> bool: # noqa: D
if not isinstance(other, SqlColumnReferenceExpression):
return False
return self.col_ref == other.col_ref
class SqlBetweenExpression(SqlExpressionNode):
"""A BETWEEN clause like `column BETWEEN val1 AND val2`."""
def __init__( # noqa: D
self, column_arg: SqlExpressionNode, start_expr: SqlExpressionNode, end_expr: SqlExpressionNode
) -> None:
self._column_arg = column_arg
self._start_expr = start_expr
self._end_expr = end_expr
super().__init__(node_id=self.create_unique_id(), parent_nodes=[column_arg, start_expr, end_expr])
def id_prefix(cls) -> IdPrefix: # noqa: D
return StaticIdPrefix.SQL_EXPR_BETWEEN_PREFIX
def requires_parenthesis(self) -> bool: # noqa: D
return False
def accept(self, visitor: SqlExpressionNodeVisitor[VisitorOutputT]) -> VisitorOutputT: # noqa: D
return visitor.visit_between_expr(self)
def description(self) -> str: # noqa: D
return "BETWEEN operator"
def column_arg(self) -> SqlExpressionNode: # noqa: D
return self._column_arg
def start_expr(self) -> SqlExpressionNode: # noqa: D
return self._start_expr
def end_expr(self) -> SqlExpressionNode: # noqa: D
return self._end_expr
def rewrite( # noqa: D
self,
column_replacements: Optional[SqlColumnReplacements] = None,
should_render_table_alias: Optional[bool] = None,
) -> SqlExpressionNode:
return SqlBetweenExpression(
column_arg=self.column_arg.rewrite(column_replacements, should_render_table_alias),
start_expr=self.start_expr.rewrite(column_replacements, should_render_table_alias),
end_expr=self.end_expr.rewrite(column_replacements, should_render_table_alias),
)
def lineage(self) -> SqlExpressionTreeLineage: # noqa: D
return SqlExpressionTreeLineage.combine(
tuple(x.lineage for x in self.parent_nodes) + (SqlExpressionTreeLineage(other_exprs=(self,)),)
)
def matches(self, other: SqlExpressionNode) -> bool: # noqa: D
if not isinstance(other, SqlBetweenExpression):
return False
return self._parents_match(other)
ISO8601_PYTHON_FORMAT = "%Y-%m-%d"
The provided code snippet includes necessary dependencies for implementing the `_make_time_range_comparison_expr` function. Write a Python function `def _make_time_range_comparison_expr( table_alias: str, column_alias: str, time_range_constraint: TimeRangeConstraint ) -> SqlExpressionNode` to solve the following problem:
Build an expression like "ds BETWEEN CAST('2020-01-01' AS TIMESTAMP) AND CAST('2020-01-02' AS TIMESTAMP).
Here is the function:
def _make_time_range_comparison_expr(
table_alias: str, column_alias: str, time_range_constraint: TimeRangeConstraint
) -> SqlExpressionNode:
"""Build an expression like "ds BETWEEN CAST('2020-01-01' AS TIMESTAMP) AND CAST('2020-01-02' AS TIMESTAMP)."""
# TODO: Update when adding < day granularity support.
return SqlBetweenExpression(
column_arg=SqlColumnReferenceExpression(
SqlColumnReference(
table_alias=table_alias,
column_name=column_alias,
)
),
start_expr=SqlStringLiteralExpression(
literal_value=time_range_constraint.start_time.strftime(ISO8601_PYTHON_FORMAT),
),
end_expr=SqlStringLiteralExpression(
literal_value=time_range_constraint.end_time.strftime(ISO8601_PYTHON_FORMAT),
),
) | Build an expression like "ds BETWEEN CAST('2020-01-01' AS TIMESTAMP) AND CAST('2020-01-02' AS TIMESTAMP). |
179,407 | from __future__ import annotations
import collections
from dataclasses import dataclass
from typing import Dict, Optional, Sequence, Tuple
from metricflow.specs.specs import MeasureSpec, NonAdditiveDimensionSpec
class GroupedMeasureSpecsByAdditiveness:
"""Results after grouping measures by their additive properties."""
grouped_semi_additive_measures: Sequence[Tuple[MeasureSpec, ...]]
additive_measures: Tuple[MeasureSpec, ...]
def measures_by_additiveness(self) -> Dict[Optional[NonAdditiveDimensionSpec], Tuple[MeasureSpec, ...]]:
"""Returns a mapping from additiveness spec to a tuple of measure specs.
This is useful if you wish to consume the tuples of MeasureSpecs in a single pass without having to
divide calls up by the existence of an additiveness specification
"""
additiveness_to_measures: Dict[Optional[NonAdditiveDimensionSpec], Tuple[MeasureSpec, ...]] = {}
if self.additive_measures:
additiveness_to_measures[None] = self.additive_measures
for grouped_specs in self.grouped_semi_additive_measures:
assert len(grouped_specs) > 0, "received empty set of measure specs, this should not happen!"
# These all have the same additiveness spec value
non_additive_spec = grouped_specs[0].non_additive_dimension_spec
additiveness_to_measures[non_additive_spec] = grouped_specs
return additiveness_to_measures
class MeasureSpec(InstanceSpec): # noqa: D
element_name: str
non_additive_dimension_spec: Optional[NonAdditiveDimensionSpec] = None
fill_nulls_with: Optional[int] = None
def from_name(name: str) -> MeasureSpec:
"""Construct from a name e.g. listing__ds__month."""
return MeasureSpec(element_name=name)
def from_reference(reference: MeasureReference) -> MeasureSpec:
"""Initialize from a measure reference instance."""
return MeasureSpec(element_name=reference.element_name)
def qualified_name(self) -> str: # noqa: D
return self.element_name
def reference(self) -> MeasureReference: # noqa: D
return MeasureReference(element_name=self.element_name)
def accept(self, visitor: InstanceSpecVisitor[VisitorOutputT]) -> VisitorOutputT: # noqa: D
return visitor.visit_measure_spec(self)
def as_spec_set(self) -> InstanceSpecSet:
return InstanceSpecSet(measure_specs=(self,))
The provided code snippet includes necessary dependencies for implementing the `group_measure_specs_by_additiveness` function. Write a Python function `def group_measure_specs_by_additiveness(measure_specs: Sequence[MeasureSpec]) -> GroupedMeasureSpecsByAdditiveness` to solve the following problem:
Bucket the provided measure specs by. - Additive Measures - Semi-additive measures containing the same non-additive dimension attributes
Here is the function:
def group_measure_specs_by_additiveness(measure_specs: Sequence[MeasureSpec]) -> GroupedMeasureSpecsByAdditiveness:
"""Bucket the provided measure specs by.
- Additive Measures
- Semi-additive measures containing the same non-additive dimension attributes
"""
bucket = collections.defaultdict(list)
additive_bucket = []
for spec in measure_specs:
non_additive_dimension_spec = spec.non_additive_dimension_spec
if non_additive_dimension_spec:
bucket[non_additive_dimension_spec.bucket_hash].append(spec)
else:
additive_bucket.append(spec)
return GroupedMeasureSpecsByAdditiveness(
grouped_semi_additive_measures=tuple(tuple(measures) for measures in bucket.values()),
additive_measures=tuple(additive_bucket),
) | Bucket the provided measure specs by. - Additive Measures - Semi-additive measures containing the same non-additive dimension attributes |
179,408 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import traceback
from functools import update_wrapper, wraps
from typing import Any, Callable, List, Optional
import click
from dateutil.parser import parse
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli.cli_context import CLIContext
def start_end_time_options(function: Callable) -> Callable:
"""Options for start_time and end_time."""
function = click.option(
"--start-time",
type=str,
default=None,
help="Optional iso8601 timestamp to constraint the start time of the data (inclusive)",
callback=lambda ctx, param, value: convert_to_datetime(value),
)(function)
function = click.option(
"--end-time",
type=str,
default=None,
help="Optional iso8601 timestamp to constraint the end time of the data (inclusive)",
callback=lambda ctx, param, value: convert_to_datetime(value),
)(function)
return function
def validate_limit(limit: Optional[str]) -> Optional[int]:
"""Validates and transform limit input."""
if limit and not limit.isnumeric():
raise click.BadParameter("limit must be an int. For no limit, do not pass this argument")
return int(limit) if limit else None
The provided code snippet includes necessary dependencies for implementing the `query_options` function. Write a Python function `def query_options(function: Callable) -> Callable` to solve the following problem:
Common options for a query.
Here is the function:
def query_options(function: Callable) -> Callable:
"""Common options for a query."""
function = click.option(
"--order",
type=click_custom.SequenceParamType(),
help='Metrics or group bys to order by ("-" prefix for DESC). For example: --order -ds or --order ds,-revenue',
required=False,
)(function)
function = click.option(
"--limit",
type=str,
help="Limit the number of rows out using an int or leave blank for no limit. For example: --limit 100",
callback=lambda ctx, param, value: validate_limit(value),
)(function)
function = click.option(
"--where",
type=str,
default=None,
help='SQL-like where statement provided as a string. For example: --where "revenue > 100"',
)(function)
function = start_end_time_options(function)
function = click.option(
"--group-by",
type=click_custom.SequenceParamType(),
default="",
help="Dimensions and/or entities to group by: syntax is --group-by ds or for multiple group bys --group-by ds,org",
)(function)
function = click.option(
"--metrics",
type=click_custom.SequenceParamType(min_length=0),
default="",
help="Metrics to query for: syntax is --metrics bookings or for multiple metrics --metrics bookings,messages",
)(function)
return function | Common options for a query. |
179,409 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import traceback
from functools import update_wrapper, wraps
from typing import Any, Callable, List, Optional
import click
from dateutil.parser import parse
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli.cli_context import CLIContext
def parse_comma_separated_inputs(value: Optional[str]) -> Optional[List[str]]: # noqa: D
# If comma exist, explode this into a list and return
if value is None:
return None
if "," in value:
return [i.strip() for i in value.split(",")]
# Return a list of the single value
return [value] | null |
179,410 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import traceback
from functools import update_wrapper, wraps
from typing import Any, Callable, List, Optional
import click
from dateutil.parser import parse
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli.cli_context import CLIContext
logger = logging.getLogger(__name__)
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
The provided code snippet includes necessary dependencies for implementing the `exception_handler` function. Write a Python function `def exception_handler(func: Callable[..., Any]) -> Callable[..., Any]` to solve the following problem:
Decorator to handle exceptions.
Here is the function:
def exception_handler(func: Callable[..., Any]) -> Callable[..., Any]: # type: ignore[misc]
"""Decorator to handle exceptions."""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any: # type: ignore[misc]
try:
func(*args, **kwargs)
except Exception as e:
# This will log to the file handlers registered in the root.
logging.exception("Got an exception in the exception handler.")
# Checks if CLIContext has verbose flag set
if isinstance(args[0], CLIContext):
cli_context: CLIContext = args[0]
click.echo(f"\nERROR: {str(e)}\nLog file: {cli_context.log_file_path}")
else:
if not isinstance(args[0], CLIContext):
logger.error(
f"Missing {CLIContext.__name__} as the first argument to the function "
f"{getattr(func, '__name__', repr(func))}"
)
click.echo(f"\nERROR: {str(e)}")
if args and hasattr(args[0], "verbose") and args[0].verbose is True:
click.echo(traceback.format_exc())
exit(1)
return wrapper | Decorator to handle exceptions. |
179,411 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import traceback
from functools import update_wrapper, wraps
from typing import Any, Callable, List, Optional
import click
from dateutil.parser import parse
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli.cli_context import CLIContext
def dbt_project_file_exists() -> bool:
"""Check that the cwd is a dbt project root. Currently done by checking for existence of dbt_project.yml."""
return pathlib.Path("dbt_project.yml").exists()
The provided code snippet includes necessary dependencies for implementing the `error_if_not_in_dbt_project` function. Write a Python function `def error_if_not_in_dbt_project(func: Callable) -> Callable` to solve the following problem:
Decorator to output an error message and exit if caller is not in a root directory of a dbt project.
Here is the function:
def error_if_not_in_dbt_project(func: Callable) -> Callable:
"""Decorator to output an error message and exit if caller is not in a root directory of a dbt project."""
@click.pass_context
def new_func(ctx: click.core.Context, *args: Any, **kwargs: Any) -> Any: # type: ignore[misc]
if not dbt_project_file_exists():
click.echo(
"❌ Unable to locate 'dbt_project.yml' in the current directory\n"
"In order to run the MetricFlow CLI, you must be running in the root directory of a working dbt project.\n"
"Please check out `https://docs.getdbt.com/reference/commands/init` if you want to get started on building a dbt project."
)
exit(1)
return ctx.invoke(func, *args, **kwargs)
return update_wrapper(new_func, func) | Decorator to output an error message and exit if caller is not in a root directory of a dbt project. |
179,412 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import signal
import sys
import tempfile
import textwrap
import time
import warnings
from importlib.metadata import version as pkg_version
from typing import Callable, List, Optional, Sequence
import click
import jinja2
import pandas as pd
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from halo import Halo
from packaging.version import parse
from update_checker import UpdateChecker
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli import PACKAGE_NAME
from metricflow.cli.cli_context import CLIContext
from metricflow.cli.constants import DEFAULT_RESULT_DECIMAL_PLACES, MAX_LIST_OBJECT_ELEMENTS
from metricflow.cli.dbt_connectors.dbt_config_accessor import dbtArtifacts
from metricflow.cli.tutorial import (
dbtMetricFlowTutorialHelper,
)
from metricflow.cli.utils import (
dbt_project_file_exists,
error_if_not_in_dbt_project,
exception_handler,
query_options,
start_end_time_options,
)
from metricflow.dag.dag_visualization import display_dag_as_svg
from metricflow.engine.metricflow_engine import MetricFlowExplainResult, MetricFlowQueryRequest, MetricFlowQueryResult
from metricflow.model.data_warehouse_model_validator import DataWarehouseModelValidator
from metricflow.telemetry.models import TelemetryLevel
from metricflow.telemetry.reporter import TelemetryReporter, log_call
logger = logging.getLogger(__name__)
PACKAGE_NAME = "metricflow"
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
def cli(cfg: CLIContext, verbose: bool) -> None: # noqa: D
# Some HTTP logging callback somewhere is failing to close its SSL connections correctly.
# For now, filter those warnings so they don't pop up in CLI stderr
# note - this should be addressed as adapter connection issues might produce these as well
warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>")
cfg.verbose = verbose
checker = UpdateChecker()
result = checker.check(PACKAGE_NAME, pkg_version(PACKAGE_NAME))
# result is None when an update was not found or a failure occurred
if result:
click.secho(
"‼️ Warning: A new version of the MetricFlow CLI is available.",
bold=True,
fg="red",
)
click.echo(
f"💡 Please update to version {result.available_version}, released {result.release_date} by running:\n"
f"\t$ pip install --upgrade {PACKAGE_NAME}\n",
)
# Cancel queries submitted to the DW if the user precess CTRL + c / process is terminated.
# Note: docs unclear on the type for the 'frame' argument.
def exit_signal_handler(signal_type: int, frame) -> None: # type: ignore
if signal_type == signal.SIGINT:
click.echo("Got SIGINT")
elif signal_type == signal.SIGTERM:
click.echo("Got SIGTERM")
else:
# Shouldn't happen since this should ony be registered for SIGINT / SIGTERM.
click.echo(f"Got unhandled signal {signal_type}")
return
try:
# Note: we may wish to add support for canceling all queries if zombie queries are a problem
logger.info("Closing client connections")
cfg.sql_client.close()
finally:
sys.exit(-1)
signal.signal(signal.SIGINT, exit_signal_handler)
signal.signal(signal.SIGTERM, exit_signal_handler) | null |
179,413 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import signal
import sys
import tempfile
import textwrap
import time
import warnings
from importlib.metadata import version as pkg_version
from typing import Callable, List, Optional, Sequence
import click
import jinja2
import pandas as pd
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from halo import Halo
from packaging.version import parse
from update_checker import UpdateChecker
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli import PACKAGE_NAME
from metricflow.cli.cli_context import CLIContext
from metricflow.cli.constants import DEFAULT_RESULT_DECIMAL_PLACES, MAX_LIST_OBJECT_ELEMENTS
from metricflow.cli.dbt_connectors.dbt_config_accessor import dbtArtifacts
from metricflow.cli.tutorial import (
dbtMetricFlowTutorialHelper,
)
from metricflow.cli.utils import (
dbt_project_file_exists,
error_if_not_in_dbt_project,
exception_handler,
query_options,
start_end_time_options,
)
from metricflow.dag.dag_visualization import display_dag_as_svg
from metricflow.engine.metricflow_engine import MetricFlowExplainResult, MetricFlowQueryRequest, MetricFlowQueryResult
from metricflow.model.data_warehouse_model_validator import DataWarehouseModelValidator
from metricflow.telemetry.models import TelemetryLevel
from metricflow.telemetry.reporter import TelemetryReporter, log_call
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
class dbtMetricFlowTutorialHelper:
"""Helper class for managing tutorial related actions (ie., generating sample files)."""
SAMPLE_DBT_MODEL_DIRECTORY = "sample_dbt_models"
SAMPLE_MODELS_DIRECTORY = SAMPLE_DBT_MODEL_DIRECTORY + "/sample_models"
SAMPLE_SEED_DIRECTORY = SAMPLE_DBT_MODEL_DIRECTORY + "/seeds"
SAMPLE_SEMANTIC_MANIFEST = SAMPLE_DBT_MODEL_DIRECTORY + "/semantic_manifest.json"
SAMPLE_SOURCES_FILE = "sources.yml"
def generate_model_files(model_path: pathlib.Path, profile_schema: str) -> None:
"""Generates the sample model files to the given dbt model path."""
sample_model_path = pathlib.Path(__file__).parent / dbtMetricFlowTutorialHelper.SAMPLE_MODELS_DIRECTORY
shutil.copytree(src=sample_model_path, dst=model_path)
# Generate the sources.yml file with the schema given in profiles.yml
sample_sources_path = (
pathlib.Path(__file__).parent
/ dbtMetricFlowTutorialHelper.SAMPLE_DBT_MODEL_DIRECTORY
/ dbtMetricFlowTutorialHelper.SAMPLE_SOURCES_FILE
)
with open(sample_sources_path) as file:
contents = Template(file.read()).substitute({"system_schema": profile_schema})
dest_sources_path = pathlib.Path(model_path) / dbtMetricFlowTutorialHelper.SAMPLE_SOURCES_FILE
with open(dest_sources_path, "w") as file:
file.write(contents)
def generate_seed_files(seed_path: pathlib.Path) -> None:
"""Generates the sample seed files to the given dbt seed path."""
sample_seed_path = pathlib.Path(__file__).parent / dbtMetricFlowTutorialHelper.SAMPLE_SEED_DIRECTORY
shutil.copytree(src=sample_seed_path, dst=seed_path)
def generate_semantic_manifest_file(manifest_path: pathlib.Path) -> None:
"""Generates the sample semantic manifest to the given dbt semantic manifest path."""
target_path = manifest_path.parent
if not target_path.exists():
target_path.mkdir()
sample_manifest_path = pathlib.Path(__file__).parent / dbtMetricFlowTutorialHelper.SAMPLE_SEMANTIC_MANIFEST
shutil.copy(src=sample_manifest_path, dst=manifest_path)
def remove_sample_files(model_path: pathlib.Path, seed_path: pathlib.Path) -> None:
"""Remove the sample files generated."""
if model_path.exists():
shutil.rmtree(model_path)
if seed_path.exists():
shutil.rmtree(seed_path)
def check_if_path_exists(paths: Sequence[pathlib.Path]) -> bool:
"""Check if the given set of paths already exists, return True if any of the paths exists."""
return any(p.exists() for p in paths)
def dbt_project_file_exists() -> bool:
"""Check that the cwd is a dbt project root. Currently done by checking for existence of dbt_project.yml."""
return pathlib.Path("dbt_project.yml").exists()
The provided code snippet includes necessary dependencies for implementing the `tutorial` function. Write a Python function `def tutorial(ctx: click.core.Context, cfg: CLIContext, msg: bool, clean: bool) -> None` to solve the following problem:
Run user through a tutorial.
Here is the function:
def tutorial(ctx: click.core.Context, cfg: CLIContext, msg: bool, clean: bool) -> None:
"""Run user through a tutorial."""
help_msg = textwrap.dedent(
"""\
🤓 Please run the following steps,
1. Verify that your adapter credentials are correct in `profiles.yml`
2. Add time spine model to the models directory (https://docs.getdbt.com/docs/build/metricflow-time-spine)
3. Run `dbt seed`, check to see that the steps related to countries, transactions, customers are passing.
4. Try validating your data model: `mf validate-configs`
5. Check out your metrics: `mf list metrics`
6. Check out dimensions for your metric `mf list dimensions --metrics transactions`
7. Query your first metric: `mf query --metrics transactions --group-by metric_time --order metric_time`
8. Show the SQL MetricFlow generates:
`mf query --metrics transactions --group-by metric_time --order metric_time --explain`
9. Visualize the plan:
`mf query --metrics transactions --group-by metric_time --order metric_time --explain --display-plans`
* This only works if you have graphviz installed - see README.
10. Add another dimension:
`mf query --metrics transactions --group-by metric_time,customer__customer_country --order metric_time`
11. Add a coarser time granularity:
`mf query --metrics transactions --group-by metric_time__week --order metric_time__week`
12. Try a more complicated query: mf query --metrics transactions,transaction_usd_na --group-by metric_time,is_large --order metric_time --start-time 2022-03-20 --end-time 2022-04-01.
13. When you're done with the tutorial, run mf tutorial --clean to delete sample models and seeds.
"""
)
if msg:
click.echo(help_msg)
exit()
if not dbt_project_file_exists():
click.echo(
"Unable to detect dbt project. Please ensure that your current working directory is at the root of the dbt project."
)
exit()
# TODO: Health checks
# Load the metadata from dbt project
try:
dbt_project_metadata = cfg.dbt_project_metadata
dbt_paths = dbt_project_metadata.dbt_paths
model_path = pathlib.Path(dbt_paths.model_paths[0]) / "sample_model"
seed_path = pathlib.Path(dbt_paths.seed_paths[0]) / "sample_seed"
manifest_path = pathlib.Path(dbt_paths.target_path) / "semantic_manifest.json"
except Exception as e:
click.echo(f"Unable to parse path metadata from dbt project.\nERROR: {str(e)}")
exit(1)
# Remove sample files from dbt project
if clean:
click.confirm("Would you like to remove all the sample files?", abort=True)
spinner = Halo(text="Removing sample files...", spinner="dots")
spinner.start()
try:
dbtMetricFlowTutorialHelper.remove_sample_files(model_path=model_path, seed_path=seed_path)
spinner.succeed("🗑️ Sample files has been removed.")
exit()
except Exception as e:
spinner.fail(f"❌ Unable to remove sample files.\nERROR: {str(e)}")
exit(1)
click.echo(
textwrap.dedent(
f"""\
To begin building and querying metrics, you must define semantic models and
metric configuration files in your dbt project. dbt will use these files to generate a
semantic manifest artifact, which MetricFlow will use to create a semantic graph for querying.
As part of this tutorial, we will generate the following files to help you get started:
📜 model files -> {model_path.absolute().as_posix()}
🌱 seed files -> {seed_path.absolute().as_posix()}
✅ semantic manifest json file -> {manifest_path.absolute().as_posix()}
"""
)
)
click.confirm("Continue and generate the files?", abort=True)
# Generate sample files into dbt project
if dbtMetricFlowTutorialHelper.check_if_path_exists([model_path, seed_path]):
click.confirm("There are existing files in the paths above, would you like to overwrite them?", abort=True)
dbtMetricFlowTutorialHelper.remove_sample_files(model_path=model_path, seed_path=seed_path)
spinner = Halo(text="Generating sample files...", spinner="dots")
spinner.start()
dbtMetricFlowTutorialHelper.generate_model_files(model_path=model_path, profile_schema=dbt_project_metadata.schema)
dbtMetricFlowTutorialHelper.generate_seed_files(seed_path=seed_path)
dbtMetricFlowTutorialHelper.generate_semantic_manifest_file(manifest_path=manifest_path)
spinner.succeed("📜 Sample files has been generated.")
click.echo(help_msg)
click.echo("💡 Run `mf tutorial --msg` to see this message again without executing everything else")
exit() | Run user through a tutorial. |
179,414 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import signal
import sys
import tempfile
import textwrap
import time
import warnings
from importlib.metadata import version as pkg_version
from typing import Callable, List, Optional, Sequence
import click
import jinja2
import pandas as pd
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from halo import Halo
from packaging.version import parse
from update_checker import UpdateChecker
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli import PACKAGE_NAME
from metricflow.cli.cli_context import CLIContext
from metricflow.cli.constants import DEFAULT_RESULT_DECIMAL_PLACES, MAX_LIST_OBJECT_ELEMENTS
from metricflow.cli.dbt_connectors.dbt_config_accessor import dbtArtifacts
from metricflow.cli.tutorial import (
dbtMetricFlowTutorialHelper,
)
from metricflow.cli.utils import (
dbt_project_file_exists,
error_if_not_in_dbt_project,
exception_handler,
query_options,
start_end_time_options,
)
from metricflow.dag.dag_visualization import display_dag_as_svg
from metricflow.engine.metricflow_engine import MetricFlowExplainResult, MetricFlowQueryRequest, MetricFlowQueryResult
from metricflow.model.data_warehouse_model_validator import DataWarehouseModelValidator
from metricflow.telemetry.models import TelemetryLevel
from metricflow.telemetry.reporter import TelemetryReporter, log_call
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
DEFAULT_RESULT_DECIMAL_PLACES = 2
def display_dag_as_svg(dag_graph: DagGraphT, directory_path: str) -> str:
"""Create and display the plan as an SVG in the browser.
Returns the path where the SVG file was created within "mf_config_dir".
"""
svg_dir = os.path.join(directory_path, "generated_svg")
random_file_path = os.path.join(svg_dir, f"dag_{random_id()}")
render_via_graphviz(dag_graph=dag_graph, file_path_without_svg_suffix=random_file_path)
return random_file_path + ".svg"
class MetricFlowQueryRequest:
"""Encapsulates the parameters for a metric query.
TODO: This has turned into a bag of parameters that make it difficult to use without a bunch of conditionals.
metric_names: Names of the metrics to query.
metrics: Metric objects to query.
group_by_names: Names of the dimensions and entities to query.
group_by: Dimension or entity objects to query.
limit: Limit the result to this many rows.
time_constraint_start: Get data for the start of this time range.
time_constraint_end: Get data for the end of this time range.
where_constraint: A SQL string using group by names that can be used like a where clause on the output data.
order_by_names: metric and group by names to order by. A "-" can be used to specify reverse order e.g. "-ds".
order_by: metric, dimension, or entity objects to order by.
output_table: If specified, output the result data to this table instead of a result dataframe.
sql_optimization_level: The level of optimization for the generated SQL.
query_type: Type of MetricFlow query.
"""
request_id: MetricFlowRequestId
saved_query_name: Optional[str] = None
metric_names: Optional[Sequence[str]] = None
metrics: Optional[Sequence[MetricQueryParameter]] = None
group_by_names: Optional[Sequence[str]] = None
group_by: Optional[Tuple[GroupByParameter, ...]] = None
limit: Optional[int] = None
time_constraint_start: Optional[datetime.datetime] = None
time_constraint_end: Optional[datetime.datetime] = None
where_constraint: Optional[str] = None
order_by_names: Optional[Sequence[str]] = None
order_by: Optional[Sequence[OrderByQueryParameter]] = None
min_max_only: bool = False
output_table: Optional[str] = None
sql_optimization_level: SqlQueryOptimizationLevel = SqlQueryOptimizationLevel.O4
query_type: MetricFlowQueryType = MetricFlowQueryType.METRIC
def create_with_random_request_id( # noqa: D
saved_query_name: Optional[str] = None,
metric_names: Optional[Sequence[str]] = None,
metrics: Optional[Sequence[MetricQueryParameter]] = None,
group_by_names: Optional[Sequence[str]] = None,
group_by: Optional[Tuple[GroupByParameter, ...]] = None,
limit: Optional[int] = None,
time_constraint_start: Optional[datetime.datetime] = None,
time_constraint_end: Optional[datetime.datetime] = None,
where_constraint: Optional[str] = None,
order_by_names: Optional[Sequence[str]] = None,
order_by: Optional[Sequence[OrderByQueryParameter]] = None,
output_table: Optional[str] = None,
sql_optimization_level: SqlQueryOptimizationLevel = SqlQueryOptimizationLevel.O4,
query_type: MetricFlowQueryType = MetricFlowQueryType.METRIC,
min_max_only: bool = False,
) -> MetricFlowQueryRequest:
return MetricFlowQueryRequest(
request_id=MetricFlowRequestId(mf_rid=f"{random_id()}"),
saved_query_name=saved_query_name,
metric_names=metric_names,
metrics=metrics,
group_by_names=group_by_names,
group_by=group_by,
limit=limit,
time_constraint_start=time_constraint_start,
time_constraint_end=time_constraint_end,
where_constraint=where_constraint,
order_by_names=order_by_names,
order_by=order_by,
output_table=output_table,
sql_optimization_level=sql_optimization_level,
query_type=query_type,
min_max_only=min_max_only,
)
class MetricFlowQueryResult: # noqa: D
"""The result of a query and context on how it was generated."""
query_spec: MetricFlowQuerySpec
dataflow_plan: DataflowPlan
sql: str
result_df: Optional[pd.DataFrame] = None
result_table: Optional[SqlTable] = None
class MetricFlowExplainResult:
"""Returns plans for resolving a query."""
query_spec: MetricFlowQuerySpec
dataflow_plan: DataflowPlan
execution_plan: ExecutionPlan
output_table: Optional[SqlTable] = None
def rendered_sql(self) -> SqlQuery:
"""Return the SQL query that would be run for the given query."""
if len(self.execution_plan.tasks) != 1:
raise NotImplementedError(
f"Multiple tasks in the execution plan not yet supported. Got tasks: {self.execution_plan.tasks}"
)
sql_query = self.execution_plan.tasks[0].sql_query
if not sql_query:
raise NotImplementedError(
f"Execution plan tasks without a SQL query not yet supported. Got tasks: {self.execution_plan.tasks}"
)
return sql_query
def rendered_sql_without_descriptions(self) -> SqlQuery:
"""Return the SQL query without the inline descriptions."""
sql_query = self.rendered_sql
return SqlQuery(
sql_query="\n".join(
filter(
lambda line: not line.strip().startswith("--"),
sql_query.sql_query.split("\n"),
)
),
bind_parameters=sql_query.bind_parameters,
)
The provided code snippet includes necessary dependencies for implementing the `query` function. Write a Python function `def query( cfg: CLIContext, metrics: Optional[Sequence[str]] = None, group_by: Optional[Sequence[str]] = None, where: Optional[str] = None, start_time: Optional[dt.datetime] = None, end_time: Optional[dt.datetime] = None, order: Optional[List[str]] = None, limit: Optional[int] = None, csv: Optional[click.utils.LazyFile] = None, explain: bool = False, show_dataflow_plan: bool = False, display_plans: bool = False, decimals: int = DEFAULT_RESULT_DECIMAL_PLACES, show_sql_descriptions: bool = False, saved_query: Optional[str] = None, ) -> None` to solve the following problem:
Create a new query with MetricFlow and assembles a MetricFlowQueryResult.
Here is the function:
def query(
cfg: CLIContext,
metrics: Optional[Sequence[str]] = None,
group_by: Optional[Sequence[str]] = None,
where: Optional[str] = None,
start_time: Optional[dt.datetime] = None,
end_time: Optional[dt.datetime] = None,
order: Optional[List[str]] = None,
limit: Optional[int] = None,
csv: Optional[click.utils.LazyFile] = None,
explain: bool = False,
show_dataflow_plan: bool = False,
display_plans: bool = False,
decimals: int = DEFAULT_RESULT_DECIMAL_PLACES,
show_sql_descriptions: bool = False,
saved_query: Optional[str] = None,
) -> None:
"""Create a new query with MetricFlow and assembles a MetricFlowQueryResult."""
start = time.time()
spinner = Halo(text="Initiating query…", spinner="dots")
spinner.start()
mf_request = MetricFlowQueryRequest.create_with_random_request_id(
saved_query_name=saved_query,
metric_names=metrics,
group_by_names=group_by,
limit=limit,
time_constraint_start=start_time,
time_constraint_end=end_time,
where_constraint=where,
order_by_names=order,
)
explain_result: Optional[MetricFlowExplainResult] = None
query_result: Optional[MetricFlowQueryResult] = None
if explain:
explain_result = cfg.mf.explain(mf_request=mf_request)
else:
query_result = cfg.mf.query(mf_request=mf_request)
spinner.succeed(f"Success 🦄 - query completed after {time.time() - start:.2f} seconds")
if explain:
assert explain_result
sql = (
explain_result.rendered_sql_without_descriptions.sql_query
if not show_sql_descriptions
else explain_result.rendered_sql.sql_query
)
if show_dataflow_plan:
click.echo("🔎 Generated Dataflow Plan + SQL (remove --explain to see data):")
click.echo(
textwrap.indent(
jinja2.Template(
textwrap.dedent(
"""\
Metric Dataflow Plan:
{{ plan_text | indent(4) }}
"""
),
undefined=jinja2.StrictUndefined,
).render(plan_text=explain_result.dataflow_plan.text_structure()),
prefix="-- ",
)
)
click.echo("")
else:
click.echo(
"🔎 SQL (remove --explain to see data or add --show-dataflow-plan to see the generated dataflow plan):"
)
click.echo(sql)
if display_plans:
click.echo("Creating temporary directory for storing visualization output.")
temp_path = tempfile.mkdtemp()
svg_path = display_dag_as_svg(explain_result.dataflow_plan, temp_path)
click.echo("")
click.echo(f"Plan SVG saved to: {svg_path}")
exit()
assert query_result
df = query_result.result_df
# Show the data if returned successfully
if df is not None:
if df.empty:
click.echo("🕳 Successful MQL query returned an empty result set.")
elif csv is not None:
# csv is a LazyFile that is file-like that works in this case.
df.to_csv(csv, index=False) # type: ignore
click.echo(f"🖨 Successfully written query output to {csv.name}")
else:
# NOTE: remove `to_string` if no pandas dependency is < 1.1.0
if parse(pd.__version__) >= parse("1.1.0"):
click.echo(df.to_markdown(index=False, floatfmt=f".{decimals}f"))
else:
click.echo(df.to_string(index=False, float_format=lambda x: format(x, f".{decimals}f")))
if display_plans:
temp_path = tempfile.mkdtemp()
svg_path = display_dag_as_svg(query_result.dataflow_plan, temp_path)
click.echo(f"Plan SVG saved to: {svg_path}") | Create a new query with MetricFlow and assembles a MetricFlowQueryResult. |
179,415 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import signal
import sys
import tempfile
import textwrap
import time
import warnings
from importlib.metadata import version as pkg_version
from typing import Callable, List, Optional, Sequence
import click
import jinja2
import pandas as pd
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from halo import Halo
from packaging.version import parse
from update_checker import UpdateChecker
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli import PACKAGE_NAME
from metricflow.cli.cli_context import CLIContext
from metricflow.cli.constants import DEFAULT_RESULT_DECIMAL_PLACES, MAX_LIST_OBJECT_ELEMENTS
from metricflow.cli.dbt_connectors.dbt_config_accessor import dbtArtifacts
from metricflow.cli.tutorial import (
dbtMetricFlowTutorialHelper,
)
from metricflow.cli.utils import (
dbt_project_file_exists,
error_if_not_in_dbt_project,
exception_handler,
query_options,
start_end_time_options,
)
from metricflow.dag.dag_visualization import display_dag_as_svg
from metricflow.engine.metricflow_engine import MetricFlowExplainResult, MetricFlowQueryRequest, MetricFlowQueryResult
from metricflow.model.data_warehouse_model_validator import DataWarehouseModelValidator
from metricflow.telemetry.models import TelemetryLevel
from metricflow.telemetry.reporter import TelemetryReporter, log_call
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
The provided code snippet includes necessary dependencies for implementing the `list` function. Write a Python function `def list(cfg: CLIContext) -> None` to solve the following problem:
Retrieve metadata values about metrics/dimensions/entities/dimension values.
Here is the function:
def list(cfg: CLIContext) -> None: # noqa: D
"""Retrieve metadata values about metrics/dimensions/entities/dimension values.""" | Retrieve metadata values about metrics/dimensions/entities/dimension values. |
179,416 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import signal
import sys
import tempfile
import textwrap
import time
import warnings
from importlib.metadata import version as pkg_version
from typing import Callable, List, Optional, Sequence
import click
import jinja2
import pandas as pd
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from halo import Halo
from packaging.version import parse
from update_checker import UpdateChecker
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli import PACKAGE_NAME
from metricflow.cli.cli_context import CLIContext
from metricflow.cli.constants import DEFAULT_RESULT_DECIMAL_PLACES, MAX_LIST_OBJECT_ELEMENTS
from metricflow.cli.dbt_connectors.dbt_config_accessor import dbtArtifacts
from metricflow.cli.tutorial import (
dbtMetricFlowTutorialHelper,
)
from metricflow.cli.utils import (
dbt_project_file_exists,
error_if_not_in_dbt_project,
exception_handler,
query_options,
start_end_time_options,
)
from metricflow.dag.dag_visualization import display_dag_as_svg
from metricflow.engine.metricflow_engine import MetricFlowExplainResult, MetricFlowQueryRequest, MetricFlowQueryResult
from metricflow.model.data_warehouse_model_validator import DataWarehouseModelValidator
from metricflow.telemetry.models import TelemetryLevel
from metricflow.telemetry.reporter import TelemetryReporter, log_call
def dimensions(cfg: CLIContext, metrics: List[str]) -> None:
"""List all unique dimensions."""
spinner = Halo(
text="🔍 Looking for all available dimensions...",
spinner="dots",
)
spinner.start()
dimensions = cfg.mf.simple_dimensions_for_metrics(metrics)
if not dimensions:
spinner.fail("List of dimensions unavailable.")
spinner.succeed(f"🌱 We've found {len(dimensions)} common dimensions for metrics {metrics}.")
for dimension in dimensions:
click.echo(f"• {click.style(dimension.granularity_free_qualified_name, bold=True, fg='green')}")
"--metrics",
type=click_custom.SequenceParamType(min_length=1),
default="",
help="List entities by given metrics (intersection). Ex. --metrics bookings,messages",
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
MAX_LIST_OBJECT_ELEMENTS = 5
The provided code snippet includes necessary dependencies for implementing the `metrics` function. Write a Python function `def metrics(cfg: CLIContext, show_all_dimensions: bool = False, search: Optional[str] = None) -> None` to solve the following problem:
List the metrics with their available dimensions. Automatically truncates long lists of dimensions, pass --show-all-dims to see all.
Here is the function:
def metrics(cfg: CLIContext, show_all_dimensions: bool = False, search: Optional[str] = None) -> None:
"""List the metrics with their available dimensions.
Automatically truncates long lists of dimensions, pass --show-all-dims to see all.
"""
spinner = Halo(text="🔍 Looking for all available metrics...", spinner="dots")
spinner.start()
metrics = cfg.mf.list_metrics()
if not metrics:
spinner.fail("List of metrics unavailable.")
filter_msg = ""
if search is not None:
num_metrics = len(metrics)
metrics = [m for m in metrics if search.lower() in m.name.lower()]
filter_msg = f" matching `{search}`, of a total of {num_metrics} available"
spinner.succeed(f"🌱 We've found {len(metrics)} metrics{filter_msg}.")
click.echo('The list below shows metrics in the format of "metric_name: list of available dimensions"')
num_dims_to_show = MAX_LIST_OBJECT_ELEMENTS
for m in metrics:
# sort dimensions by whether they're local first(if / then global else local) then the dim name
dimensions = sorted([dimension.granularity_free_qualified_name for dimension in m.dimensions])
if show_all_dimensions:
num_dims_to_show = len(dimensions)
click.echo(
f"• {click.style(m.name, bold=True, fg='green')}: {', '.join(dimensions[:num_dims_to_show])}"
+ (f" and {len(dimensions) - num_dims_to_show} more" if len(dimensions) > num_dims_to_show else "")
) | List the metrics with their available dimensions. Automatically truncates long lists of dimensions, pass --show-all-dims to see all. |
179,417 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import signal
import sys
import tempfile
import textwrap
import time
import warnings
from importlib.metadata import version as pkg_version
from typing import Callable, List, Optional, Sequence
import click
import jinja2
import pandas as pd
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from halo import Halo
from packaging.version import parse
from update_checker import UpdateChecker
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli import PACKAGE_NAME
from metricflow.cli.cli_context import CLIContext
from metricflow.cli.constants import DEFAULT_RESULT_DECIMAL_PLACES, MAX_LIST_OBJECT_ELEMENTS
from metricflow.cli.dbt_connectors.dbt_config_accessor import dbtArtifacts
from metricflow.cli.tutorial import (
dbtMetricFlowTutorialHelper,
)
from metricflow.cli.utils import (
dbt_project_file_exists,
error_if_not_in_dbt_project,
exception_handler,
query_options,
start_end_time_options,
)
from metricflow.dag.dag_visualization import display_dag_as_svg
from metricflow.engine.metricflow_engine import MetricFlowExplainResult, MetricFlowQueryRequest, MetricFlowQueryResult
from metricflow.model.data_warehouse_model_validator import DataWarehouseModelValidator
from metricflow.telemetry.models import TelemetryLevel
from metricflow.telemetry.reporter import TelemetryReporter, log_call
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
The provided code snippet includes necessary dependencies for implementing the `entities` function. Write a Python function `def entities(cfg: CLIContext, metrics: List[str]) -> None` to solve the following problem:
List all unique entities.
Here is the function:
def entities(cfg: CLIContext, metrics: List[str]) -> None:
"""List all unique entities."""
spinner = Halo(
text="🔍 Looking for all available entities...",
spinner="dots",
)
spinner.start()
entities = cfg.mf.entities_for_metrics(metrics)
if not entities:
spinner.fail("List of entities unavailable.")
spinner.succeed(f"🌱 We've found {len(entities)} common entities for metrics {metrics}.")
for entity in entities:
click.echo(f"• {click.style(entity.name, bold=True, fg='green')}") | List all unique entities. |
179,418 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import signal
import sys
import tempfile
import textwrap
import time
import warnings
from importlib.metadata import version as pkg_version
from typing import Callable, List, Optional, Sequence
import click
import jinja2
import pandas as pd
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from halo import Halo
from packaging.version import parse
from update_checker import UpdateChecker
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli import PACKAGE_NAME
from metricflow.cli.cli_context import CLIContext
from metricflow.cli.constants import DEFAULT_RESULT_DECIMAL_PLACES, MAX_LIST_OBJECT_ELEMENTS
from metricflow.cli.dbt_connectors.dbt_config_accessor import dbtArtifacts
from metricflow.cli.tutorial import (
dbtMetricFlowTutorialHelper,
)
from metricflow.cli.utils import (
dbt_project_file_exists,
error_if_not_in_dbt_project,
exception_handler,
query_options,
start_end_time_options,
)
from metricflow.dag.dag_visualization import display_dag_as_svg
from metricflow.engine.metricflow_engine import MetricFlowExplainResult, MetricFlowQueryRequest, MetricFlowQueryResult
from metricflow.model.data_warehouse_model_validator import DataWarehouseModelValidator
from metricflow.telemetry.models import TelemetryLevel
from metricflow.telemetry.reporter import TelemetryReporter, log_call
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
The provided code snippet includes necessary dependencies for implementing the `health_checks` function. Write a Python function `def health_checks(cfg: CLIContext) -> None` to solve the following problem:
Performs a health check against the DW provided in the configs.
Here is the function:
def health_checks(cfg: CLIContext) -> None:
"""Performs a health check against the DW provided in the configs."""
spinner = Halo(
text="🏥 Running health checks against your data warehouse... (This should not take longer than 30s for a successful connection)",
spinner="dots",
)
spinner.start()
res = cfg.run_health_checks()
spinner.succeed("Health checks completed.")
for test in res:
test_res = res[test]
if test_res["status"] != "SUCCESS":
click.echo(f"• ❌ {click.style(test, bold=True, fg=('red'))}: Failed with - {test_res['error_message']}.")
else:
click.echo(f"• ✅ {click.style(test, bold=True, fg=('green'))}: Success!") | Performs a health check against the DW provided in the configs. |
179,419 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import signal
import sys
import tempfile
import textwrap
import time
import warnings
from importlib.metadata import version as pkg_version
from typing import Callable, List, Optional, Sequence
import click
import jinja2
import pandas as pd
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from halo import Halo
from packaging.version import parse
from update_checker import UpdateChecker
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli import PACKAGE_NAME
from metricflow.cli.cli_context import CLIContext
from metricflow.cli.constants import DEFAULT_RESULT_DECIMAL_PLACES, MAX_LIST_OBJECT_ELEMENTS
from metricflow.cli.dbt_connectors.dbt_config_accessor import dbtArtifacts
from metricflow.cli.tutorial import (
dbtMetricFlowTutorialHelper,
)
from metricflow.cli.utils import (
dbt_project_file_exists,
error_if_not_in_dbt_project,
exception_handler,
query_options,
start_end_time_options,
)
from metricflow.dag.dag_visualization import display_dag_as_svg
from metricflow.engine.metricflow_engine import MetricFlowExplainResult, MetricFlowQueryRequest, MetricFlowQueryResult
from metricflow.model.data_warehouse_model_validator import DataWarehouseModelValidator
from metricflow.telemetry.models import TelemetryLevel
from metricflow.telemetry.reporter import TelemetryReporter, log_call
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
The provided code snippet includes necessary dependencies for implementing the `dimension_values` function. Write a Python function `def dimension_values( cfg: CLIContext, metrics: List[str], dimension: str, start_time: Optional[dt.datetime] = None, end_time: Optional[dt.datetime] = None, ) -> None` to solve the following problem:
List all dimension values with the corresponding metrics.
Here is the function:
def dimension_values(
cfg: CLIContext,
metrics: List[str],
dimension: str,
start_time: Optional[dt.datetime] = None,
end_time: Optional[dt.datetime] = None,
) -> None:
"""List all dimension values with the corresponding metrics."""
spinner = Halo(
text=f"🔍 Retrieving dimension values for dimension '{dimension}' of metrics '{', '.join(metrics)}'...",
spinner="dots",
)
spinner.start()
dim_vals: Optional[List[str]] = None
try:
dim_vals = cfg.mf.get_dimension_values(
metric_names=metrics,
get_group_by_values=dimension,
time_constraint_start=start_time,
time_constraint_end=end_time,
)
except Exception as e:
spinner.fail()
click.echo(
textwrap.dedent(
f"""\
❌ Failed to query dimension values for dimension {dimension} of metrics {', '.join(metrics)}.
ERROR: {str(e)}
"""
)
)
exit(1)
assert dim_vals
spinner.succeed(
f"🌱 We've found {len(dim_vals)} dimension values for dimension {dimension} of metrics {', '.join(metrics)}."
)
for dim_val in dim_vals:
click.echo(f"• {click.style(dim_val, bold=True, fg='green')}") | List all dimension values with the corresponding metrics. |
179,420 | from __future__ import annotations
import datetime as dt
import logging
import pathlib
import signal
import sys
import tempfile
import textwrap
import time
import warnings
from importlib.metadata import version as pkg_version
from typing import Callable, List, Optional, Sequence
import click
import jinja2
import pandas as pd
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.validations.semantic_manifest_validator import SemanticManifestValidator
from dbt_semantic_interfaces.validations.validator_helpers import SemanticManifestValidationResults
from halo import Halo
from packaging.version import parse
from update_checker import UpdateChecker
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli import PACKAGE_NAME
from metricflow.cli.cli_context import CLIContext
from metricflow.cli.constants import DEFAULT_RESULT_DECIMAL_PLACES, MAX_LIST_OBJECT_ELEMENTS
from metricflow.cli.dbt_connectors.dbt_config_accessor import dbtArtifacts
from metricflow.cli.tutorial import (
dbtMetricFlowTutorialHelper,
)
from metricflow.cli.utils import (
dbt_project_file_exists,
error_if_not_in_dbt_project,
exception_handler,
query_options,
start_end_time_options,
)
from metricflow.dag.dag_visualization import display_dag_as_svg
from metricflow.engine.metricflow_engine import MetricFlowExplainResult, MetricFlowQueryRequest, MetricFlowQueryResult
from metricflow.model.data_warehouse_model_validator import DataWarehouseModelValidator
from metricflow.telemetry.models import TelemetryLevel
from metricflow.telemetry.reporter import TelemetryReporter, log_call
def _print_issues(
issues: SemanticManifestValidationResults, show_non_blocking: bool = False, verbose: bool = False
) -> None: # noqa: D
for issue in issues.errors:
print(f"• {issue.as_cli_formatted_str(verbose=verbose)}")
if show_non_blocking:
for issue in issues.future_errors:
print(f"• {issue.as_cli_formatted_str(verbose=verbose)}")
for issue in issues.warnings:
print(f"• {issue.as_cli_formatted_str(verbose=verbose)}")
def _data_warehouse_validations_runner(
dw_validator: DataWarehouseModelValidator, manifest: SemanticManifest, timeout: Optional[int]
) -> SemanticManifestValidationResults:
"""Helper which calls the individual data warehouse validations to run and prints collected issues."""
semantic_model_results = _run_dw_validations(
dw_validator.validate_semantic_models, manifest=manifest, validation_type="semantic models", timeout=timeout
)
dimension_results = _run_dw_validations(
dw_validator.validate_dimensions, manifest=manifest, validation_type="dimensions", timeout=timeout
)
entity_results = _run_dw_validations(
dw_validator.validate_entities, manifest=manifest, validation_type="entities", timeout=timeout
)
measure_results = _run_dw_validations(
dw_validator.validate_measures, manifest=manifest, validation_type="measures", timeout=timeout
)
metric_results = _run_dw_validations(
dw_validator.validate_metrics, manifest=manifest, validation_type="metrics", timeout=timeout
)
return SemanticManifestValidationResults.merge(
[semantic_model_results, dimension_results, entity_results, measure_results, metric_results]
)
"--dw-timeout", required=False, type=int, help="Optional timeout for data warehouse validation steps. Default None."
class CLIContext:
"""Context for MetricFlow CLI."""
def __init__(self) -> None:
"""Initialize the CLI context for executing commands.
The dbt_artifacts construct must be loaded in order for logging configuration to work correctly.
"""
self.verbose = False
self._dbt_project_metadata: dbtProjectMetadata = dbtProjectMetadata.load_from_project_path(pathlib.Path.cwd())
self._dbt_artifacts: Optional[dbtArtifacts] = None
self._mf: Optional[MetricFlowEngine] = None
self._sql_client: Optional[SqlClient] = None
self._semantic_manifest: Optional[SemanticManifest] = None
self._semantic_manifest_lookup: Optional[SemanticManifestLookup] = None
# self.log_file_path invokes the dbtRunner. If this is done after the configure_logging call all of the
# dbt CLI logging configuration could be overridden, resulting in lots of things printing to console
self._configure_logging(log_file_path=self.log_file_path)
def _configure_logging(self, log_file_path: pathlib.Path) -> None:
"""Initialize the logging spec for the CLI.
This requires a fully loaded dbt project, including what amounts to a call to dbt debug.
As a practical matter, this should not have much end user impact except in cases where they are
using commands that do not require a working adapter AND the call to dbt debug runs slowly.
In future we may have better API access to the log file location for the project, at which time
we can swap this out and return to full lazy loading for any context attributes that are slow
to initialize.
"""
log_format = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d [%(threadName)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
log_file_handler = TimedRotatingFileHandler(
filename=log_file_path,
# Rotate every day to a new file, keep 7 days worth.
when="D",
interval=1,
backupCount=7,
)
formatter = logging.Formatter(fmt=log_format)
log_file_handler.setFormatter(formatter)
log_file_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
# StreamHandler to the console would have been setup by logging.basicConfig
for handler in root_logger.handlers:
handler.setLevel(logging.CRITICAL)
root_logger.addHandler(log_file_handler)
def dbt_project_metadata(self) -> dbtProjectMetadata:
"""Property accessor for dbt project metadata, useful in cases where the full manifest load is not needed."""
return self._dbt_project_metadata
def dbt_artifacts(self) -> dbtArtifacts:
"""Property accessor for all dbt artifacts, used for powering the sql client (among other things)."""
if self._dbt_artifacts is None:
self._dbt_artifacts = dbtArtifacts.load_from_project_metadata(self._dbt_project_metadata)
return self._dbt_artifacts
def log_file_path(self) -> pathlib.Path:
"""Returns the location of the log file path for this CLI invocation."""
# The dbt Project.log_path attribute is currently sourced from the final runtime config value accessible
# through the CLI state flags. As such, it will deviate from the default based on the DBT_LOG_PATH environment
# variable. Should this behavior change, we will need to update this call.
return pathlib.Path(self._dbt_project_metadata.project.log_path, "metricflow.log")
def sql_client(self) -> SqlClient:
"""Property accessor for the sql_client class used in the CLI."""
if self._sql_client is None:
self._sql_client = AdapterBackedSqlClient(self.dbt_artifacts.adapter)
return self._sql_client
def run_health_checks(self) -> Dict[str, Dict[str, str]]:
"""Execute the DB health checks."""
checks_to_run = [
("SELECT 1", lambda: self.sql_client.execute("SELECT 1")),
]
results: Dict[str, Dict[str, str]] = {}
for step, check in checks_to_run:
status = "SUCCESS"
err_string = ""
try:
resp = check()
logger.info(f"Health Check Item {step}: succeeded" + f" with response {str(resp)}" if resp else None)
except Exception as e:
status = "FAIL"
err_string = str(e)
logger.error(f"Health Check Item {step}: failed with error {err_string}")
results[f"{self.sql_client.sql_engine_type} - {step}"] = {
"status": status,
"error_message": err_string,
}
return results
def mf(self) -> MetricFlowEngine: # noqa: D
if self._mf is None:
self._mf = MetricFlowEngine(
semantic_manifest_lookup=self.semantic_manifest_lookup,
sql_client=self.sql_client,
)
assert self._mf is not None
return self._mf
def _build_semantic_manifest_lookup(self) -> None:
"""Get the path to the models and create a corresponding SemanticManifestLookup."""
self._semantic_manifest_lookup = SemanticManifestLookup(self.semantic_manifest)
def semantic_manifest_lookup(self) -> SemanticManifestLookup: # noqa: D
if self._semantic_manifest_lookup is None:
self._build_semantic_manifest_lookup()
assert self._semantic_manifest_lookup is not None
return self._semantic_manifest_lookup
def semantic_manifest(self) -> SemanticManifest:
"""Retrieve the semantic manifest from the dbt project root."""
return self.dbt_artifacts.semantic_manifest
class dbtArtifacts:
"""Container with access to the dbt artifacts required to power the MetricFlow CLI.
In order to avoid double-loading this should generally be built from the dbtProjectMetadata struct.
This does not inherit because it is a slightly different struct. In most cases this is the object
we want to reference.
"""
profile: Profile
project: Project
adapter: BaseAdapter
semantic_manifest: SemanticManifest
def load_from_project_metadata(cls: Type[Self], project_metadata: dbtProjectMetadata) -> Self:
"""Loads adapter and semantic manifest associated with the previously-fetched project metadata."""
# dbt's get_adapter helper expects an AdapterRequiredConfig, but `project` is missing cli_vars
# In practice, get_adapter only actually requires HasCredentials, so we replicate the type extraction
# from get_adapter here rather than spinning up a full RuntimeConfig instance
# TODO: Move to a fully supported interface when one becomes available
adapter = get_adapter_by_type(project_metadata.profile.credentials.type)
semantic_manifest = dbtArtifacts.build_semantic_manifest_from_dbt_project_root(
project_root=project_metadata.project_path
)
return cls(
profile=project_metadata.profile,
project=project_metadata.project,
adapter=adapter,
semantic_manifest=semantic_manifest,
)
def build_semantic_manifest_from_dbt_project_root(project_root: Path) -> SemanticManifest:
"""In the dbt project root, retrieve the manifest path and parse the SemanticManifest."""
DEFAULT_TARGET_PATH = "target/semantic_manifest.json"
full_path_to_manifest = Path(project_root, DEFAULT_TARGET_PATH).resolve()
if not full_path_to_manifest.exists():
raise ModelCreationException(
f"Unable to find {full_path_to_manifest}\n"
"Please ensure that you are running `mf` in the root directory of a dbt project "
"and that the semantic_manifest JSON exists. If this is your first time running "
"`mf`, run `dbt parse` to generate the semantic_manifest JSON."
)
try:
with open(full_path_to_manifest, "r") as file:
raw_contents = file.read()
return parse_manifest_from_dbt_generated_manifest(manifest_json_string=raw_contents)
except Exception as e:
raise ModelCreationException from e
class DataWarehouseModelValidator:
"""A Validator for checking specific tasks for the manifest against the Data Warehouse.
Data Warehouse Validations are validations that are done against the data
warehouse based on the manifest configured by the user. Their purpose is to
ensure that queries generated by MetricFlow won't fail when you go to use
them (assuming the manifest has passed these validations before use).
"""
def __init__(self, sql_client: SqlClient) -> None: # noqa: D
self._sql_client = sql_client
def run_tasks(
self, tasks: List[DataWarehouseValidationTask], timeout: Optional[int] = None
) -> SemanticManifestValidationResults:
"""Runs the list of tasks as queries agains the data warehouse, returning any found issues.
Args:
tasks: A list of tasks to run against the data warehouse
timeout: An optional timeout. Default is None. When the timeout is hit, function will return early.
Returns:
A list of validation issues discovered when running the passed in tasks against the data warehosue
"""
# Used for keeping track if we go past the max time
start_time = perf_counter()
issues: List[ValidationIssue] = []
# TODO: Asyncio implementation
for index, task in enumerate(tasks):
if timeout is not None and perf_counter() - start_time > timeout:
issues.append(
ValidationWarning(
context=None,
message=f"Hit timeout before completing all tasks. Completed {index}/{len(tasks)} tasks.",
)
)
break
try:
(query_string, query_params) = task.query_and_params_callable()
self._sql_client.dry_run(stmt=query_string, sql_bind_parameters=query_params)
except Exception as e:
issues.append(
ValidationError(
context=task.context,
message=task.error_message + f"\nReceived following error from data warehouse:\n{e}",
extra_detail="".join(traceback.format_tb(e.__traceback__)),
)
)
if task.on_fail_subtasks:
sub_task_timeout = floor(timeout - (perf_counter() - start_time)) if timeout else None
issues += self.run_tasks(tasks=task.on_fail_subtasks, timeout=sub_task_timeout).all_issues
return SemanticManifestValidationResults.from_issues_sequence(issues)
def validate_semantic_models(
self, manifest: SemanticManifest, timeout: Optional[int] = None
) -> SemanticManifestValidationResults:
"""Generates a list of tasks for validating the semantic models of the model and then runs them.
Args:
manifest: SemanticManifest which to run data warehouse validations on
timeout: An optional timeout. Default is None. When the timeout is hit, function will return early.
Returns:
A list of validation issues discovered when running the passed in tasks against the data warehosue
"""
tasks = DataWarehouseTaskBuilder.gen_semantic_model_tasks(manifest=manifest)
return self.run_tasks(tasks=tasks, timeout=timeout)
def validate_dimensions(
self, manifest: SemanticManifest, timeout: Optional[int] = None
) -> SemanticManifestValidationResults:
"""Generates a list of tasks for validating the dimensions of the manifest and then runs them.
Args:
manifest: SemanticManifest which to run data warehouse validations on
timeout: An optional timeout. Default is None. When the timeout is hit, function will return early.
Returns:
A list of validation issues. If there are no validation issues, an empty list is returned.
"""
tasks = DataWarehouseTaskBuilder.gen_dimension_tasks(manifest=manifest, sql_client=self._sql_client)
return self.run_tasks(tasks=tasks, timeout=timeout)
def validate_entities(
self, manifest: SemanticManifest, timeout: Optional[int] = None
) -> SemanticManifestValidationResults:
"""Generates a list of tasks for validating the entities of the manifest and then runs them.
Args:
manifest: SemanticManifest which to run data warehouse validations on
timeout: An optional timeout. Default is None. When the timeout is hit, function will return early.
Returns:
A list of validation issues. If there are no validation issues, an empty list is returned.
"""
tasks = DataWarehouseTaskBuilder.gen_entity_tasks(manifest=manifest, sql_client=self._sql_client)
return self.run_tasks(tasks=tasks, timeout=timeout)
def validate_measures(
self, manifest: SemanticManifest, timeout: Optional[int] = None
) -> SemanticManifestValidationResults:
"""Generates a list of tasks for validating the measures of the manifest and then runs them.
Args:
manifest: SemanticManifest which to run data warehouse validations on
timeout: An optional timeout. Default is None. When the timeout is hit, function will return early.
Returns:
A list of validation issues. If there are no validation issues, an empty list is returned.
"""
tasks = DataWarehouseTaskBuilder.gen_measure_tasks(manifest=manifest, sql_client=self._sql_client)
return self.run_tasks(tasks=tasks, timeout=timeout)
def validate_metrics(
self, manifest: SemanticManifest, timeout: Optional[int] = None
) -> SemanticManifestValidationResults:
"""Generates a list of tasks for validating the metrics of the manifest and then runs them.
Args:
manifest: SemanticManifest which to run data warehouse validations on
timeout: An optional timeout. Default is None. When the timeout is hit, function will return early.
Returns:
A list of validation issues. If there are no validation issues, an empty list is returned.
"""
tasks = DataWarehouseTaskBuilder.gen_metric_tasks(manifest=manifest, sql_client=self._sql_client)
return self.run_tasks(tasks=tasks, timeout=timeout)
The provided code snippet includes necessary dependencies for implementing the `validate_configs` function. Write a Python function `def validate_configs( cfg: CLIContext, dw_timeout: Optional[int] = None, skip_dw: bool = False, show_all: bool = False, verbose_issues: bool = False, semantic_validation_workers: int = 1, ) -> None` to solve the following problem:
Perform validations against the defined model configurations.
Here is the function:
def validate_configs(
cfg: CLIContext,
dw_timeout: Optional[int] = None,
skip_dw: bool = False,
show_all: bool = False,
verbose_issues: bool = False,
semantic_validation_workers: int = 1,
) -> None:
"""Perform validations against the defined model configurations."""
cfg.verbose = True
if not show_all:
print("(To see warnings and future-errors, run again with flag `--show-all`)")
# Parsing Validation
parsing_spinner = Halo(text="Building manifest from dbt project root", spinner="dots")
parsing_spinner.start()
project_root = pathlib.Path.cwd()
try:
semantic_manifest = dbtArtifacts.build_semantic_manifest_from_dbt_project_root(project_root=project_root)
parsing_spinner.succeed("🎉 Successfully parsed manifest from dbt project")
except Exception as e:
parsing_spinner.fail(f"Exception found when parsing manifest from dbt project ({str(e)})")
exit(1)
# Semantic validation
semantic_spinner = Halo(text="Validating semantics of built manifest", spinner="dots")
semantic_spinner.start()
model_issues = SemanticManifestValidator[SemanticManifest](
max_workers=semantic_validation_workers
).validate_semantic_manifest(semantic_manifest)
if not model_issues.has_blocking_issues:
semantic_spinner.succeed(f"🎉 Successfully validated the semantics of built manifest ({model_issues.summary()})")
else:
semantic_spinner.fail(
f"Breaking issues found when checking semantics of built manifest ({model_issues.summary()})"
)
_print_issues(model_issues, show_non_blocking=show_all, verbose=verbose_issues)
exit(1)
dw_results = SemanticManifestValidationResults()
if not skip_dw:
# fetch dbt adapters. This rebuilds the manifest again, but whatever.
dw_validator = DataWarehouseModelValidator(sql_client=cfg.sql_client)
dw_results = _data_warehouse_validations_runner(
dw_validator=dw_validator, manifest=semantic_manifest, timeout=dw_timeout
)
merged_results = SemanticManifestValidationResults.merge([model_issues, dw_results])
_print_issues(merged_results, show_non_blocking=show_all, verbose=verbose_issues)
if merged_results.has_blocking_issues:
exit(1) | Perform validations against the defined model configurations. |
179,421 | from __future__ import annotations
from typing import Optional
from dbt_semantic_interfaces.implementations.filters.where_filter import PydanticWhereFilter
from dbt_semantic_interfaces.protocols import WhereFilter, WhereFilterIntersection
The provided code snippet includes necessary dependencies for implementing the `merge_to_single_where_filter` function. Write a Python function `def merge_to_single_where_filter(where_filter_intersection: WhereFilterIntersection) -> Optional[WhereFilter]` to solve the following problem:
Returns a single where filter that is equivalent to the given intersection.
Here is the function:
def merge_to_single_where_filter(where_filter_intersection: WhereFilterIntersection) -> Optional[WhereFilter]:
"""Returns a single where filter that is equivalent to the given intersection."""
if len(where_filter_intersection.where_filters) == 0:
return None
if len(where_filter_intersection.where_filters) == 1:
return where_filter_intersection.where_filters[0]
each_where_filter_condition = [
"( " + where_filter.where_sql_template + " )" for where_filter in where_filter_intersection.where_filters
]
return PydanticWhereFilter(where_sql_template=" AND ".join(each_where_filter_condition)) | Returns a single where filter that is equivalent to the given intersection. |
179,422 | from __future__ import annotations
from datetime import date
from typing import Union
import pandas as pd
from dbt_semantic_interfaces.enum_extension import ExtendedEnum, assert_values_exhausted
from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity
The provided code snippet includes necessary dependencies for implementing the `offset_period` function. Write a Python function `def offset_period(time_granularity: TimeGranularity) -> pd.offsets.DateOffset` to solve the following problem:
Offset object to use for adjusting by one granularity period.
Here is the function:
def offset_period(time_granularity: TimeGranularity) -> pd.offsets.DateOffset:
"""Offset object to use for adjusting by one granularity period."""
# The type checker is throwing errors for some of those arguments, but they are valid.
if time_granularity is TimeGranularity.DAY:
return pd.offsets.DateOffset(days=1) # type: ignore
elif time_granularity is TimeGranularity.WEEK:
return pd.offsets.DateOffset(weeks=1) # type: ignore
elif time_granularity is TimeGranularity.MONTH:
return pd.offsets.DateOffset(months=1)
elif time_granularity is TimeGranularity.QUARTER:
return pd.offsets.DateOffset(months=3)
elif time_granularity is TimeGranularity.YEAR:
return pd.offsets.DateOffset(years=1) # type: ignore
else:
assert_values_exhausted(time_granularity) | Offset object to use for adjusting by one granularity period. |
179,423 | from __future__ import annotations
from datetime import date
from typing import Union
import pandas as pd
from dbt_semantic_interfaces.enum_extension import ExtendedEnum, assert_values_exhausted
from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity
The provided code snippet includes necessary dependencies for implementing the `format_with_first_or_last` function. Write a Python function `def format_with_first_or_last(time_granularity: TimeGranularity) -> bool` to solve the following problem:
Indicates that this can only be calculated if query results display the first or last date of the period.
Here is the function:
def format_with_first_or_last(time_granularity: TimeGranularity) -> bool:
"""Indicates that this can only be calculated if query results display the first or last date of the period."""
return time_granularity in [TimeGranularity.MONTH, TimeGranularity.QUARTER, TimeGranularity.YEAR] | Indicates that this can only be calculated if query results display the first or last date of the period. |
179,424 | from __future__ import annotations
from datetime import date
from typing import Union
import pandas as pd
from dbt_semantic_interfaces.enum_extension import ExtendedEnum, assert_values_exhausted
from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity
def is_period_start(time_granularity: TimeGranularity, date: Union[pd.Timestamp, date]) -> bool: # noqa: D
pd_date = pd.Timestamp(date)
if time_granularity is TimeGranularity.DAY:
return True
elif time_granularity is TimeGranularity.WEEK:
return ISOWeekDay.from_pandas_timestamp(pd_date).is_week_start
elif time_granularity is TimeGranularity.MONTH:
return pd_date.is_month_start
elif time_granularity is TimeGranularity.QUARTER:
return pd_date.is_quarter_start
elif time_granularity is TimeGranularity.YEAR:
return pd_date.is_year_start
else:
assert_values_exhausted(time_granularity)
def is_period_end(time_granularity: TimeGranularity, date: Union[pd.Timestamp, date]) -> bool: # noqa: D
pd_date = pd.Timestamp(date)
if time_granularity is TimeGranularity.DAY:
return True
elif time_granularity is TimeGranularity.WEEK:
return ISOWeekDay.from_pandas_timestamp(pd_date).is_week_end
elif time_granularity is TimeGranularity.MONTH:
return pd_date.is_month_end
elif time_granularity is TimeGranularity.QUARTER:
return pd_date.is_quarter_end
elif time_granularity is TimeGranularity.YEAR:
return pd_date.is_year_end
else:
assert_values_exhausted(time_granularity)
def adjust_to_start_of_period(
time_granularity: TimeGranularity, date_to_adjust: pd.Timestamp, rollback: bool = True
) -> pd.Timestamp:
"""Adjust to start of period if not at start already."""
if rollback:
return period_begin_offset(time_granularity).rollback(date_to_adjust)
else:
return period_begin_offset(time_granularity).rollforward(date_to_adjust)
def adjust_to_end_of_period(
time_granularity: TimeGranularity, date_to_adjust: pd.Timestamp, rollforward: bool = True
) -> pd.Timestamp:
"""Adjust to end of period if not at end already."""
if rollforward:
return period_end_offset(time_granularity).rollforward(date_to_adjust)
else:
return period_end_offset(time_granularity).rollback(date_to_adjust)
The provided code snippet includes necessary dependencies for implementing the `match_start_or_end_of_period` function. Write a Python function `def match_start_or_end_of_period( time_granularity: TimeGranularity, date_to_match: pd.Timestamp, date_to_adjust: pd.Timestamp ) -> pd.Timestamp` to solve the following problem:
Adjust date_to_adjust to be start or end of period based on if date_to_match is at start or end of period.
Here is the function:
def match_start_or_end_of_period(
time_granularity: TimeGranularity, date_to_match: pd.Timestamp, date_to_adjust: pd.Timestamp
) -> pd.Timestamp:
"""Adjust date_to_adjust to be start or end of period based on if date_to_match is at start or end of period."""
if is_period_start(time_granularity, date_to_match):
return adjust_to_start_of_period(time_granularity, date_to_adjust)
elif is_period_end(time_granularity, date_to_match):
return adjust_to_end_of_period(time_granularity, date_to_adjust)
else:
raise ValueError(
f"Expected `date_to_match` to fall at the start or end of the granularity period. Got '{date_to_match}' for granularity {time_granularity}."
) | Adjust date_to_adjust to be start or end of period based on if date_to_match is at start or end of period. |
179,425 | from __future__ import annotations
from datetime import date
from typing import Union
import pandas as pd
from dbt_semantic_interfaces.enum_extension import ExtendedEnum, assert_values_exhausted
from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity
def string_to_time_granularity(s: str) -> TimeGranularity: # noqa: D
values = {item.value: item for item in TimeGranularity}
return values[s] | null |
179,426 | from __future__ import annotations
from dataclasses import dataclass
from typing import Sequence
import rapidfuzz.fuzz
import rapidfuzz.process
class ScoredItem: # noqa: D
item_str: str
# fuzz scores from 0..100, and the higher the score, the better the match.
score: float
The provided code snippet includes necessary dependencies for implementing the `top_fuzzy_matches` function. Write a Python function `def top_fuzzy_matches( item: str, candidate_items: Sequence[str], max_matches: int = 6, ) -> Sequence[ScoredItem]` to solve the following problem:
Return the top items (by edit distance) in candidate_items that fuzzy matches the given item. Return scores from -1 -> 0 inclusive.
Here is the function:
def top_fuzzy_matches(
item: str,
candidate_items: Sequence[str],
max_matches: int = 6,
) -> Sequence[ScoredItem]:
"""Return the top items (by edit distance) in candidate_items that fuzzy matches the given item.
Return scores from -1 -> 0 inclusive.
"""
scored_items = []
# Rank choices by edit distance score.
# extract() returns a tuple like (name, score)
top_ranked_suggestions = sorted(
rapidfuzz.process.extract(
# This scorer seems to return the best results.
item,
list(candidate_items),
limit=max_matches,
scorer=rapidfuzz.fuzz.token_set_ratio,
),
# Put the highest scoring item at the top of the list.
key=lambda item_and_score_tuple: item_and_score_tuple[1],
reverse=True,
)
for fuzz_tuple in top_ranked_suggestions:
value = fuzz_tuple[0]
score = fuzz_tuple[1]
scored_items.append(ScoredItem(item_str=value, score=score))
return scored_items | Return the top items (by edit distance) in candidate_items that fuzzy matches the given item. Return scores from -1 -> 0 inclusive. |
179,427 | from __future__ import annotations
from dbt_semantic_interfaces.implementations.semantic_manifest import (
PydanticSemanticManifest,
)
from dbt_semantic_interfaces.transformations.boolean_measure import (
BooleanMeasureAggregationRule,
)
from dbt_semantic_interfaces.transformations.convert_count import ConvertCountToSumRule
from dbt_semantic_interfaces.transformations.convert_median import (
ConvertMedianToPercentileRule,
)
from dbt_semantic_interfaces.transformations.names import LowerCaseNamesRule
from dbt_semantic_interfaces.transformations.proxy_measure import CreateProxyMeasureRule
from dbt_semantic_interfaces.transformations.semantic_manifest_transformer import (
PydanticSemanticManifestTransformer,
)
from metricflow.model.transformations.dedupe_metric_input_measures import DedupeMetricInputMeasuresRule
class DedupeMetricInputMeasuresRule(ProtocolHint[SemanticManifestTransformRule[PydanticSemanticManifest]]):
"""Dedupe the input measures within a metric.
This can be removed once the fix is in the dbt-core transformation.
"""
def _implements_protocol(self) -> SemanticManifestTransformRule[PydanticSemanticManifest]: # noqa: D
return self
def transform_model(semantic_manifest: PydanticSemanticManifest) -> PydanticSemanticManifest: # noqa: D
for metric in semantic_manifest.metrics:
metric.type_params.input_measures = list(dict.fromkeys(metric.input_measures).keys())
return semantic_manifest
The provided code snippet includes necessary dependencies for implementing the `parse_manifest_from_dbt_generated_manifest` function. Write a Python function `def parse_manifest_from_dbt_generated_manifest(manifest_json_string: str) -> PydanticSemanticManifest` to solve the following problem:
Parse a PydanticSemanticManifest given the generated semantic_manifest json from dbt.
Here is the function:
def parse_manifest_from_dbt_generated_manifest(manifest_json_string: str) -> PydanticSemanticManifest:
"""Parse a PydanticSemanticManifest given the generated semantic_manifest json from dbt."""
raw_model = PydanticSemanticManifest.parse_raw(manifest_json_string)
# The serialized object in the dbt project does not have all transformations applied to it at
# this time, which causes failures with input measure resolution.
# TODO: remove this transform call once the upstream changes are integrated into our dependency tree
rules = (
# Primary
(LowerCaseNamesRule(),),
# Secondary
(
CreateProxyMeasureRule(),
BooleanMeasureAggregationRule(),
ConvertCountToSumRule(),
ConvertMedianToPercentileRule(),
DedupeMetricInputMeasuresRule(), # Remove once fix is in core
),
)
model = PydanticSemanticManifestTransformer.transform(raw_model, rules)
return model | Parse a PydanticSemanticManifest given the generated semantic_manifest json from dbt. |
179,428 | from __future__ import annotations
import logging
import time
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, FrozenSet, List, Optional, Sequence, Set, Tuple
from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
from dbt_semantic_interfaces.protocols.dimension import Dimension, DimensionType
from dbt_semantic_interfaces.protocols.semantic_manifest import SemanticManifest
from dbt_semantic_interfaces.protocols.semantic_model import SemanticModel
from dbt_semantic_interfaces.references import (
DimensionReference,
MeasureReference,
MetricReference,
SemanticModelReference,
TimeDimensionReference,
)
from dbt_semantic_interfaces.type_enums import MetricType
from dbt_semantic_interfaces.type_enums.date_part import DatePart
from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity
from metricflow.dataset.dataset import DataSet
from metricflow.errors.errors import UnknownMetricLinkingError
from metricflow.mf_logging.pretty_print import mf_pformat
from metricflow.model.semantics.linkable_element_properties import LinkableElementProperties
from metricflow.model.semantics.semantic_model_join_evaluator import SemanticModelJoinEvaluator
from metricflow.protocols.semantics import SemanticModelAccessor
from metricflow.specs.specs import (
DEFAULT_TIME_GRANULARITY,
DimensionSpec,
EntityReference,
EntitySpec,
LinkableSpecSet,
TimeDimensionSpec,
)
class LinkableDimension:
"""Describes how a dimension can be realized by joining based on entity links."""
# The semantic model where this dimension was defined.
semantic_model_origin: Optional[SemanticModelReference]
element_name: str
entity_links: Tuple[EntityReference, ...]
join_path: Tuple[SemanticModelJoinPathElement, ...]
properties: FrozenSet[LinkableElementProperties]
time_granularity: Optional[TimeGranularity]
date_part: Optional[DatePart]
def path_key(self) -> ElementPathKey: # noqa: D
return ElementPathKey(
element_name=self.element_name,
entity_links=self.entity_links,
time_granularity=self.time_granularity,
date_part=self.date_part,
)
def reference(self) -> DimensionReference: # noqa: D
return DimensionReference(element_name=self.element_name)
class SemanticModelJoinPathElement:
"""Describes joining a semantic model by the given entity."""
semantic_model_reference: SemanticModelReference
join_on_entity: EntityReference
class LinkableElementProperties(Enum):
"""The properties associated with a valid linkable element.
Local means an element that is defined within the same semantic model as the measure. This definition is used
throughout the related classes.
"""
# A local element as per above definition.
LOCAL = "local"
# A local dimension that is prefixed with a local primary entity.
LOCAL_LINKED = "local_linked"
# An element that was joined to the measure semantic model by an entity.
JOINED = "joined"
# An element that was joined to the measure semantic model by joining multiple semantic models.
MULTI_HOP = "multi_hop"
# A time dimension that is a version of a time dimension in a semantic model, but at a different granularity.
DERIVED_TIME_GRANULARITY = "derived_time_granularity"
# Refers to an entity, not a dimension.
ENTITY = "entity"
# See metric_time in DataSet
METRIC_TIME = "metric_time"
def all_properties() -> FrozenSet[LinkableElementProperties]: # noqa: D
return frozenset(
{
LinkableElementProperties.LOCAL,
LinkableElementProperties.LOCAL_LINKED,
LinkableElementProperties.JOINED,
LinkableElementProperties.MULTI_HOP,
LinkableElementProperties.DERIVED_TIME_GRANULARITY,
LinkableElementProperties.METRIC_TIME,
}
)
DEFAULT_TIME_GRANULARITY = TimeGranularity.DAY
The provided code snippet includes necessary dependencies for implementing the `_generate_linkable_time_dimensions` function. Write a Python function `def _generate_linkable_time_dimensions( semantic_model_origin: SemanticModelReference, dimension: Dimension, entity_links: Tuple[EntityReference, ...], join_path: Sequence[SemanticModelJoinPathElement], with_properties: FrozenSet[LinkableElementProperties], ) -> Sequence[LinkableDimension]` to solve the following problem:
Generates different versions of the given dimension, but at other valid time granularities.
Here is the function:
def _generate_linkable_time_dimensions(
semantic_model_origin: SemanticModelReference,
dimension: Dimension,
entity_links: Tuple[EntityReference, ...],
join_path: Sequence[SemanticModelJoinPathElement],
with_properties: FrozenSet[LinkableElementProperties],
) -> Sequence[LinkableDimension]:
"""Generates different versions of the given dimension, but at other valid time granularities."""
linkable_dimensions = []
defined_time_granularity = (
dimension.type_params.time_granularity if dimension.type_params else DEFAULT_TIME_GRANULARITY
)
for time_granularity in TimeGranularity:
if time_granularity < defined_time_granularity:
continue
properties = set(with_properties)
if time_granularity != defined_time_granularity:
properties.add(LinkableElementProperties.DERIVED_TIME_GRANULARITY)
linkable_dimensions.append(
LinkableDimension(
semantic_model_origin=semantic_model_origin,
element_name=dimension.reference.element_name,
entity_links=entity_links,
join_path=tuple(join_path),
time_granularity=time_granularity,
date_part=None,
properties=frozenset(properties),
)
)
# Add the time dimension aggregated to a different date part.
for date_part in DatePart:
if time_granularity.to_int() <= date_part.to_int():
linkable_dimensions.append(
LinkableDimension(
semantic_model_origin=semantic_model_origin,
element_name=dimension.reference.element_name,
entity_links=entity_links,
join_path=tuple(join_path),
time_granularity=time_granularity,
date_part=date_part,
properties=frozenset(properties),
)
)
return linkable_dimensions | Generates different versions of the given dimension, but at other valid time granularities. |
179,429 | from __future__ import annotations
import datetime
import functools
import logging
import os
import platform
import sys
import time
import traceback
import uuid
from hashlib import sha256
from typing import Callable, List, Optional, TypeVar
from typing_extensions import ParamSpec
from metricflow.random_id import random_id
from metricflow.telemetry.handlers.handlers import (
TelemetryHandler,
ToMemoryTelemetryHandler,
)
from metricflow.telemetry.handlers.python_log import PythonLoggerTelemetryHandler
from metricflow.telemetry.models import FunctionEndEvent, FunctionStartEvent, TelemetryLevel
class TelemetryReporter:
"""Reports telemetry for improving product experience."""
# Session ID to use when requesting a non-uniquely identifiable ID.
FULLY_ANONYMOUS_CLIENT_ID = "anonymous"
ENV_EMAIL_OVERRIDE = "METRICFLOW_CLIENT_EMAIL"
def __init__(self, report_levels_higher_or_equal_to: TelemetryLevel, fully_anonymous: bool = False) -> None:
"""If fully_anonymous is set, use a client_id that is not unique."""
self._report_levels_higher_or_equal_to = report_levels_higher_or_equal_to
self._fully_anonymous = fully_anonymous
self._email = os.getenv(TelemetryReporter.ENV_EMAIL_OVERRIDE)
if fully_anonymous:
self._client_id = TelemetryReporter.FULLY_ANONYMOUS_CLIENT_ID
elif self._email:
self._client_id = self._email
else:
self._client_id = TelemetryReporter._create_client_id()
# For testing
self._test_handler = ToMemoryTelemetryHandler()
self._handlers: List[TelemetryHandler] = []
def _create_client_id() -> str:
"""Creates an identifier for the current user based on their current environment.
More specifically, this function creates a SHA-256 hash based on the system platform, release, and MAC address.
The created client ID is not guaranteed to be unique by user.
"""
# getnode() returns the MAC.
id_str = "_".join([sys.platform, platform.release(), str(uuid.getnode())])
return sha256(id_str.encode("utf-8")).hexdigest()
def add_python_log_handler(self) -> None: # noqa: D
self._handlers.append(PythonLoggerTelemetryHandler(logger_level=logging.INFO))
def add_test_handler(self) -> None:
"""See test_handler."""
self._handlers.append(self._test_handler)
def test_handler(self) -> ToMemoryTelemetryHandler:
"""Used for testing only to verify that the handlers are getting the right events."""
return self._test_handler
def log_function_start( # noqa: D
self,
invocation_id: str,
module_name: str,
function_name: str,
) -> None:
"""Logs the start of a function call when the logging level >= USAGE.
invocation_id is to uniquely identify different function calls.
"""
if TelemetryLevel.USAGE >= self._report_levels_higher_or_equal_to:
for handler in self._handlers:
handler.log(
client_id=self._client_id,
function_start_event=FunctionStartEvent.create(
event_time=datetime.datetime.now(),
level_name=TelemetryLevel.USAGE.name,
invocation_id=invocation_id,
module_name=module_name,
function_name=function_name,
),
)
def log_function_end( # noqa: D
self, invocation_id: str, module_name: str, function_name: str, runtime: float, exception_trace: Optional[str]
) -> None:
"""Similar to log_function_end, except adding the duration of the call and exception trace on error."""
if TelemetryLevel.USAGE >= self._report_levels_higher_or_equal_to or (
exception_trace and TelemetryLevel.EXCEPTION >= self._report_levels_higher_or_equal_to
):
for handler in self._handlers:
handler.log(
client_id=self._client_id,
function_end_event=FunctionEndEvent.create(
event_time=datetime.datetime.now(),
level_name=TelemetryLevel.USAGE.name if not exception_trace else TelemetryLevel.EXCEPTION.name,
invocation_id=invocation_id,
module_name=module_name,
function_name=function_name,
runtime=runtime,
exception_trace=exception_trace,
),
)
P = ParamSpec("P")
R = TypeVar("R")
def random_id() -> str:
"""Generates an 8-digit random alphanumeric string."""
alphabet = string.ascii_lowercase + string.digits
# Characters that go below the line are visually unappealing, so don't use those.
filtered_alphabet = [x for x in alphabet if x not in "gjpqy"]
return "".join(random.choices(filtered_alphabet, k=8))
The provided code snippet includes necessary dependencies for implementing the `log_call` function. Write a Python function `def log_call(telemetry_reporter: TelemetryReporter, module_name: str) -> Callable[[Callable[P, R]], Callable[P, R]]` to solve the following problem:
Decorator to make it easier to log telemetry for function calls. Using module_name instead of introspection since it seems more robust. Example call: @log_call(telemetry_reporter=telemetry_reporter, module_name=__name__) def test_function() -> str: return "foo"
Here is the function:
def log_call(telemetry_reporter: TelemetryReporter, module_name: str) -> Callable[[Callable[P, R]], Callable[P, R]]:
"""Decorator to make it easier to log telemetry for function calls.
Using module_name instead of introspection since it seems more robust.
Example call:
@log_call(telemetry_reporter=telemetry_reporter, module_name=__name__)
def test_function() -> str:
return "foo"
"""
def decorator(func: Callable[P, R]) -> Callable[P, R]:
@functools.wraps(func)
def wrapped(*args: P.args, **kwargs: P.kwargs) -> R:
# Not every Callable has a __name__
function_name = getattr(func, "__name__", repr(func))
invocation_id = f"call_{random_id()}"
start_time = time.time()
telemetry_reporter.log_function_start(
invocation_id=invocation_id, module_name=module_name, function_name=function_name
)
exception_trace: Optional[str] = None
try:
return func(*args, **kwargs)
except Exception:
exception_trace = traceback.format_exc()
raise
finally:
telemetry_reporter.log_function_end(
invocation_id=invocation_id,
module_name=module_name,
function_name=function_name,
runtime=time.time() - start_time,
exception_trace=exception_trace,
)
return wrapped
return decorator | Decorator to make it easier to log telemetry for function calls. Using module_name instead of introspection since it seems more robust. Example call: @log_call(telemetry_reporter=telemetry_reporter, module_name=__name__) def test_function() -> str: return "foo" |
179,430 | import torch
import dataclasses
from dataclasses import dataclass
import logging
import os
import io
import json
from typing import Sequence, Dict, List, Any
import copy
from EdgeGPT import Chatbot, ConversationStyle
import transformers
from torch.utils.data import Dataset
from enum import auto, Enum
async def edgegpt_complete(prompt):
assert os.path.exists('cookies.json'), """cookies.json is not exist for Bingchat generation,
please see https://github.com/acheong08/EdgeGPT for instruction how to get your cookies.json"""
with open('cookies.json', 'r') as f:
cookies = json.load(f)
bot = Chatbot(cookies=cookies)
response = await bot.ask(prompt=prompt, conversation_style=ConversationStyle.balanced)
await bot.close()
return response | null |
179,431 | import torch
import dataclasses
from dataclasses import dataclass
import logging
import os
import io
import json
from typing import Sequence, Dict, List, Any
import copy
from EdgeGPT import Chatbot, ConversationStyle
import transformers
from torch.utils.data import Dataset
from enum import auto, Enum
def _make_w_io_base(f, mode: str):
if not isinstance(f, io.IOBase):
f_dirname = os.path.dirname(f)
if f_dirname != "":
os.makedirs(f_dirname, exist_ok=True)
f = open(f, mode=mode)
return f
The provided code snippet includes necessary dependencies for implementing the `jdump` function. Write a Python function `def jdump(obj, f, mode="w", indent=2, default=str)` to solve the following problem:
Dump a str or dictionary to a file in json format. Args: obj: An object to be written. f: A string path to the location on disk. mode: Mode for opening the file. indent: Indent for storing json dictionaries. default: A function to handle non-serializable entries; defaults to `str`.
Here is the function:
def jdump(obj, f, mode="w", indent=2, default=str):
"""Dump a str or dictionary to a file in json format.
Args:
obj: An object to be written.
f: A string path to the location on disk.
mode: Mode for opening the file.
indent: Indent for storing json dictionaries.
default: A function to handle non-serializable entries; defaults to `str`.
"""
f = _make_w_io_base(f, mode)
if isinstance(obj, (dict, list)):
json.dump(obj, f, indent=indent, default=default)
elif isinstance(obj, str):
f.write(obj)
else:
raise ValueError(f"Unexpected type: {type(obj)}")
f.close() | Dump a str or dictionary to a file in json format. Args: obj: An object to be written. f: A string path to the location on disk. mode: Mode for opening the file. indent: Indent for storing json dictionaries. default: A function to handle non-serializable entries; defaults to `str`. |
179,432 | import torch
import dataclasses
from dataclasses import dataclass
import logging
import os
import io
import json
from typing import Sequence, Dict, List, Any
import copy
from EdgeGPT import Chatbot, ConversationStyle
import transformers
from torch.utils.data import Dataset
from enum import auto, Enum
def _make_r_io_base(f, mode: str):
if not isinstance(f, io.IOBase):
f = open(f, mode=mode)
return f
The provided code snippet includes necessary dependencies for implementing the `jload` function. Write a Python function `def jload(f, mode="r")` to solve the following problem:
Load a .json file into a dictionary.
Here is the function:
def jload(f, mode="r"):
"""Load a .json file into a dictionary."""
f = _make_r_io_base(f, mode)
jdict = json.load(f)
f.close()
return jdict | Load a .json file into a dictionary. |
179,433 | import torch
import dataclasses
from dataclasses import dataclass
import logging
import os
import io
import json
from typing import Sequence, Dict, List, Any
import copy
from EdgeGPT import Chatbot, ConversationStyle
import transformers
from torch.utils.data import Dataset
from enum import auto, Enum
The provided code snippet includes necessary dependencies for implementing the `safe_save_model_for_hf_trainer` function. Write a Python function `def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str)` to solve the following problem:
Collects the state dict and dump to disk.
Here is the function:
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
output_dir: str):
"""Collects the state dict and dump to disk."""
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {
key: value.cpu()
for key, value in state_dict.items()
}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa | Collects the state dict and dump to disk. |
179,434 | import torch
import dataclasses
from dataclasses import dataclass
import logging
import os
import io
import json
from typing import Sequence, Dict, List, Any
import copy
from EdgeGPT import Chatbot, ConversationStyle
import transformers
from torch.utils.data import Dataset
from enum import auto, Enum
The provided code snippet includes necessary dependencies for implementing the `smart_tokenizer_and_embedding_resize` function. Write a Python function `def smart_tokenizer_and_embedding_resize( special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel, )` to solve the following problem:
Resize tokenizer and embedding. Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
Here is the function:
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg | Resize tokenizer and embedding. Note: This is the unoptimized version that may make your embedding size not be divisible by 64. |
179,435 | import torch
import dataclasses
from dataclasses import dataclass
import logging
import os
import io
import json
from typing import Sequence, Dict, List, Any
import copy
from EdgeGPT import Chatbot, ConversationStyle
import transformers
from torch.utils.data import Dataset
from enum import auto, Enum
default_conversation = conv_v1_2
def _tokenize_fn(strings: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
) for text in strings
]
input_ids = labels = [
tokenized.input_ids[0] for tokenized in tokenized_list
]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def _mask_targets(target, tokenized_lens, speakers, header_len, s_ids):
cur_idx = header_len
tgt_len = target.shape[0]
for tokenized_len, speaker, s_id in zip(tokenized_lens, speakers, s_ids):
if cur_idx >= tgt_len:
break
elif cur_idx + tokenized_len < tgt_len:
# Check whether the mask is applied to the correct position
if not torch.equal(target[cur_idx + 2:cur_idx + tokenized_len],
s_id[2:]):
logging.warning("a sentence mismatches the corresponding piece "
"in the conversation")
if speaker == "human":
target[cur_idx:cur_idx + tokenized_len] = IGNORE_INDEX
cur_idx += tokenized_len
def _add_speaker_and_signal(header, source, get_conversation=True):
"""Add speaker and start/end signal on each round."""
BEGIN_SIGNAL = "### "
END_SIGNAL = "\n"
conversation = header
unknown_role = "unknown" # use default unknown role
roles = {
"human": default_conversation.roles[0], # human role
"gpt": default_conversation.roles[1], # gpt role
}
for sentence in source:
sentence_from = sentence["from"].lower()
sentence["value"] = (
BEGIN_SIGNAL
+ roles.get(sentence_from, unknown_role)
+ ": "
+ sentence["value"]
+ END_SIGNAL
)
if get_conversation:
conversation += sentence["value"]
return conversation
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess( sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, ) -> Dict` to solve the following problem:
Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
Here is the function:
def preprocess(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
"""
Given a list of sources, each is a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
# add end signal and concatenate together
conversations = []
header = f"{default_conversation.system}\n\n"
for source in sources:
conversation = _add_speaker_and_signal(header, source)
conversations.append(conversation)
# tokenize conversations
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
input_ids = conversations_tokenized["input_ids"]
targets = copy.deepcopy(input_ids)
header_len = _tokenize_fn([header], tokenizer)["input_ids_lens"][0]
for target, source in zip(targets, sources):
tokenized_sentence = _tokenize_fn([s["value"] for s in source], tokenizer)
tokenized_lens = tokenized_sentence["input_ids_lens"]
# Currently, "###" is tokenized into 2 tokens in the whole conversation,
# and 1 token in a single sentence, so we do not need to use the line below.
# tokenized_lens = [l-1 for l in tokenized_lens]
speakers = [sentence["from"] for sentence in source]
ids = tokenized_sentence["input_ids"]
_mask_targets(target, tokenized_lens, speakers, header_len, ids)
return dict(input_ids=input_ids, labels=targets) | Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX. |
179,436 | import torch
import dataclasses
from dataclasses import dataclass
import logging
import os
import io
import json
from typing import Sequence, Dict, List, Any
import copy
from EdgeGPT import Chatbot, ConversationStyle
import transformers
from torch.utils.data import Dataset
from enum import auto, Enum
class SupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, data_path: str,
tokenizer: transformers.PreTrainedTokenizer):
super(SupervisedDataset, self).__init__()
logging.warning("Loading data...")
list_data_dict = json.load(open(data_path, "r"))
logging.warning("Formatting inputs...")
sources = [example["conversations"] for example in list_data_dict]
data_dict = preprocess(sources, tokenizer)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
class LazySupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, data_path: str,
tokenizer: transformers.PreTrainedTokenizer):
super(LazySupervisedDataset, self).__init__()
logging.warning("Loading data...")
list_data_dict = json.load(open(data_path, "r"))
logging.warning("Formatting inputs...Skip in lazy mode")
self.tokenizer = tokenizer
self.list_data_dict = list_data_dict
def __len__(self):
return len(self.list_data_dict)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
sources = self.list_data_dict[i]
if isinstance(i, int):
sources = [sources]
data_dict = preprocess(
copy.deepcopy([e["conversations"] for e in sources]),
self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict["input_ids"][0],
labels=data_dict["labels"][0])
return data_dict
class DataCollatorForSupervisedDataset(object):
"""Collate examples for supervised fine-tuning."""
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
input_ids, labels = tuple([instance[key] for instance in instances]
for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
return dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
)
The provided code snippet includes necessary dependencies for implementing the `make_supervised_data_module` function. Write a Python function `def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_path, lazy_preprocess=False) -> Dict` to solve the following problem:
Make dataset and collator for supervised fine-tuning.
Here is the function:
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_path,
lazy_preprocess=False) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (LazySupervisedDataset
if lazy_preprocess else SupervisedDataset)
train_dataset = dataset_cls(tokenizer=tokenizer,
data_path=data_path)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return train_dataset, None, data_collator | Make dataset and collator for supervised fine-tuning. |
179,437 | import torch
import dataclasses
from dataclasses import dataclass
import logging
import os
import io
import json
from typing import Sequence, Dict, List, Any
import copy
from EdgeGPT import Chatbot, ConversationStyle
import transformers
from torch.utils.data import Dataset
from enum import auto, Enum
def convert_vicuna(data):
# Prompt from stanford alpaca's training script
PROMPT_DICT = {
"prompt_input": (
"Below is an instruction that describes a task, paired with an input that provides further context. "
"Write a response that appropriately completes the request.\n\n"
"### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:"
),
"prompt_no_input": (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request.\n\n"
"### Instruction:\n{instruction}\n\n### Response:"
),
}
prompt_input, prompt_no_input = PROMPT_DICT["prompt_input"], PROMPT_DICT["prompt_no_input"]
sources = [
prompt_input.format_map(example) if example.get("input", "") != "" else prompt_no_input.format_map(example)
for example in data
]
targets = [example['output'] for example in data]
new_data = []
cnt = 1
for s, t in zip(sources, targets):
new_data.append({
'id': str(cnt),
'conversations': [
{
'from': 'human',
'value': s,
},
{
'from': 'gpt',
'value': t,
}
]
})
cnt += 1
return new_data | null |
179,438 | import torch
import dataclasses
from dataclasses import dataclass
import logging
import os
import io
import json
from typing import Sequence, Dict, List, Any
import copy
from EdgeGPT import Chatbot, ConversationStyle
import transformers
from torch.utils.data import Dataset
from enum import auto, Enum
def generate_stream(model, tokenizer, params, device,
context_len=2048, stream_interval=2):
prompt = params["prompt"]
l_prompt = len(prompt)
temperature = float(params.get("temperature", 1.0))
max_new_tokens = int(params.get("max_new_tokens", 256))
stop_str = params.get("stop", None)
input_ids = tokenizer(prompt).input_ids
output_ids = list(input_ids)
max_src_len = context_len - max_new_tokens - 8
input_ids = input_ids[-max_src_len:]
for i in range(max_new_tokens):
if i == 0:
out = model(
torch.as_tensor([input_ids], device=device), use_cache=True)
logits = out.logits
past_key_values = out.past_key_values
else:
attention_mask = torch.ones(
1, past_key_values[0][0].shape[-2] + 1, device=device)
out = model(input_ids=torch.as_tensor([[token]], device=device),
use_cache=True,
attention_mask=attention_mask,
past_key_values=past_key_values)
logits = out.logits
past_key_values = out.past_key_values
last_token_logits = logits[0][-1]
if device == "mps":
# Switch to CPU by avoiding some bugs in mps backend.
last_token_logits = last_token_logits.float().to("cpu")
if temperature < 1e-4:
token = int(torch.argmax(last_token_logits))
else:
probs = torch.softmax(last_token_logits / temperature, dim=-1)
token = int(torch.multinomial(probs, num_samples=1))
output_ids.append(token)
if token == tokenizer.eos_token_id:
stopped = True
else:
stopped = False
if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
output = tokenizer.decode(output_ids, skip_special_tokens=True)
pos = output.rfind(stop_str, l_prompt)
if pos != -1:
output = output[:pos]
stopped = True
yield output
if stopped:
break
del past_key_values | null |
179,439 | import os
import logging
import transformers
from transformers import LlamaForCausalLM, LlamaTokenizer
from peft import (
LoraConfig,
get_peft_model,
get_peft_model_state_dict,
)
from omegaconf import OmegaConf
from ingest_docs import ingest_docs
from data_gen import launch_data_generation
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
import argparse
from peft import prepare_model_for_int8_training
from utils import make_supervised_data_module, smart_tokenizer_and_embedding_resize
def args_parse():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="config.yaml")
parser.add_argument("--local_rank", type=int, default=0)
return parser.parse_args() | null |
179,440 | import os
import time
import utils
import json
import random
import string
import regex as re
import pickle
import openai
import tqdm
import asyncio
import tiktoken
from langchain.docstore.document import Document
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
def find_word_in_string(w, s):
return re.compile(r"\b({0})\b".format(w), flags=re.IGNORECASE).search(s) | null |
179,441 | import os
import time
import utils
import json
import random
import string
import regex as re
import pickle
import openai
import tqdm
import asyncio
import tiktoken
from langchain.docstore.document import Document
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
def launch_CoT_generation():
return NotImplementedError("This method is not yet implemented") | null |
179,442 | import os
import time
import utils
import json
import random
import string
import regex as re
import pickle
import openai
import tqdm
import asyncio
import tiktoken
from langchain.docstore.document import Document
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
def launch_data_generation(
url_docs,
documents_embeds,
output_dir="assets/",
num_tasks_to_generate=140,
strategy_instruct="summarizing-gpt-3.5-turbo-generating-gpt-4",
model_name_code="gpt-4",
num_docs_to_output=1,
use_scraped_docs=True,
temperature=0.7,
top_p=1.0,
max_tokens=500,
logger=None,
**kwargs
):
def ingest_docs(url_docs: str, recursive_depth: int = 1, return_summary: bool = True, logger=None) -> Tuple[List, List]:
def unit_test():
import logging
from ingest_docs import ingest_docs
logger = logging.getLogger(__name__)
class Config:
def __init__(self):
self.DATA_PATH = "assets/"
self.NUM_TASKS_TO_GENERATE = 100
docs, docs_for_summary = ingest_docs("https://developers.notion.com/reference", recursive_depth=1, logger=logger)
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_documents(docs, embeddings)
with open("assets/vectorstore.pkl", "wb") as f:
pickle.dump(vectorstore, f)
api_docs = "https://developers.notion.com/reference"
cfg = Config()
launch_data_generation(
url_docs=api_docs,
documents_embeds=vectorstore,
output_dir=cfg.DATA_PATH,
num_tasks_to_generate=cfg.NUM_TASKS_TO_GENERATE,
model_name="gpt-4",
logger=logger,
num_prompt_instructions=3,
documents_for_summary=docs_for_summary
) | null |
179,443 | import pickle as pkl
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
import torch
from utils import conv_v1_2, SeparatorStyle
from utils import generate_stream as generate_stream_func
import argparse
import os.path as osp
def args_parse():
parser = argparse.ArgumentParser(description='Inference with AlpacaAPI')
parser.add_argument('--model_folder', type=str, required=True)
parser.add_argument('--device', type=str, default="cuda")
return parser.parse_args() | null |
179,444 | import pickle as pkl
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
import torch
from utils import conv_v1_2, SeparatorStyle
from utils import generate_stream as generate_stream_func
import argparse
import os.path as osp
class SimpleChatIO:
def prompt_for_input(self, role) -> str:
return input(f"{role}: ")
def prompt_for_output(self, role: str):
print(f"{role}: ", end="", flush=True)
def stream_output(self, output_stream, skip_echo_len: int):
pre = 0
for outputs in output_stream:
outputs = outputs[skip_echo_len:].strip()
outputs = outputs.split(" ")
now = len(outputs) - 1
if now > pre:
print(" ".join(outputs[pre:now]), end=" ", flush=True)
pre = now
print(" ".join(outputs[pre:]), flush=True)
return " ".join(outputs)
class SeparatorStyle(Enum):
"""Different separator style."""
SINGLE = auto()
TWO = auto()
conv_v1_2 = Conversation(
system="A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
roles=("Human", "Assistant"),
messages=(
("Human", "What are the key differences between renewable and non-renewable energy sources?"),
("Assistant",
"Renewable energy sources are those that can be replenished naturally in a relatively "
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
"renewable and non-renewable energy sources:\n"
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
"energy sources are finite and will eventually run out.\n"
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
"and other negative effects.\n"
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
"have lower operational costs than non-renewable sources.\n"
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
"locations than non-renewable sources.\n"
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
"non-renewable sources are not, and their depletion can lead to economic and social instability.\n")
),
offset=2,
sep_style=SeparatorStyle.SINGLE,
sep="###",
)
def vicuna_chat(model_name, device, num_gpus, load_8bit=False, debug=False):
prefix = """Below is an instruction that describes a task, paired with an API references that provides further about the API. Write code that appropriately completes the request.\n\n### Instruction:\n """
if device == "cpu":
kwargs = {}
elif device == "cuda":
kwargs = {"torch_dtype": torch.float16}
if num_gpus == "auto":
kwargs["device_map"] = "auto"
else:
num_gpus = int(num_gpus)
if num_gpus != 1:
kwargs.update({
"device_map": "auto",
"max_memory": {i: "13GiB" for i in range(num_gpus)},
})
model = LlamaForCausalLM.from_pretrained(model_name,
load_in_8bit=load_8bit, low_cpu_mem_usage=True, **kwargs)
tokenizer = LlamaTokenizer.from_pretrained("jeffwan/vicuna-13b", use_fast=False)
chatio = SimpleChatIO()
if device == "cuda" and num_gpus == 1:
model.to(device)
if debug:
print(model)
conv = conv_v1_2.copy()
while True:
try:
inp = chatio.prompt_for_input(conv.roles[0])
except EOFError:
inp = ""
if not inp:
print("exit...")
break
with open("assets/vectorstore.pkl", "rb") as f:
vectorstore = pkl.load(f)
docs = vectorstore.similarity_search(inp, k=1)[0].page_content
inp = prefix + inp + "\n\n### Input:\n" + docs + "\n\n### Code:"
conv.append_message(conv.roles[0], inp)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
skip_echo_len = len(prompt.replace("</s>", " ")) + 1
params = {
"model": model_name,
"prompt": prompt,
"temperature": 0.7,
"max_new_tokens": 700,
"stop": conv.sep if conv.sep_style == SeparatorStyle.SINGLE else conv.sep2,
}
chatio.prompt_for_output(conv.roles[1])
output_stream = generate_stream_func(model, tokenizer, params, device)
outputs = chatio.stream_output(output_stream, skip_echo_len)
# NOTE: strip is important to align with the training data.
conv.messages[-1][-1] = outputs.strip()
if debug:
print("\n", {"prompt": prompt, "outputs": outputs}, "\n") | null |
179,445 | from typing import List, Optional, Tuple
import torch
import transformers
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
from einops import rearrange
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
from flash_attn.bert_padding import unpad_input, pad_input
def forward(
self,
hidden_states: torch.Tensor,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor],
Optional[Tuple[torch.Tensor]]]:
def _prepare_decoder_attention_mask(self, attention_mask, input_shape,
inputs_embeds, past_key_values_length):
def replace_llama_attn_with_flash_attn():
transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward | null |
179,446 | import os
from collections import deque
from typing import Dict, List, Optional, Any
from langchain import LLMChain, OpenAI, PromptTemplate
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import BaseLLM
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain.chains.base import Chain
from langchain.vectorstores import FAISS
from langchain.docstore import InMemoryDocstore
import faiss
The provided code snippet includes necessary dependencies for implementing the `get_next_task` function. Write a Python function `def get_next_task(task_creation_chain: LLMChain, result: Dict, task_description: str, task_list: List[str], objective: str) -> List[Dict]` to solve the following problem:
Get the next task.
Here is the function:
def get_next_task(task_creation_chain: LLMChain, result: Dict, task_description: str, task_list: List[str], objective: str) -> List[Dict]:
"""Get the next task."""
incomplete_tasks = ", ".join(task_list)
response = task_creation_chain.run(result=result, task_description=task_description, incomplete_tasks=incomplete_tasks, objective=objective)
new_tasks = response.split('\n')
return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()] | Get the next task. |
179,447 | import os
from collections import deque
from typing import Dict, List, Optional, Any
from langchain import LLMChain, OpenAI, PromptTemplate
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import BaseLLM
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain.chains.base import Chain
from langchain.vectorstores import FAISS
from langchain.docstore import InMemoryDocstore
import faiss
The provided code snippet includes necessary dependencies for implementing the `prioritize_tasks` function. Write a Python function `def prioritize_tasks(task_prioritization_chain: LLMChain, this_task_id: int, task_list: List[Dict], objective: str) -> List[Dict]` to solve the following problem:
Prioritize tasks.
Here is the function:
def prioritize_tasks(task_prioritization_chain: LLMChain, this_task_id: int, task_list: List[Dict], objective: str) -> List[Dict]:
"""Prioritize tasks."""
task_names = [t["task_name"] for t in task_list]
next_task_id = int(this_task_id) + 1
response = task_prioritization_chain.run(task_names=task_names, next_task_id=next_task_id, objective=objective)
new_tasks = response.split('\n')
prioritized_task_list = []
for task_string in new_tasks:
if not task_string.strip():
continue
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
return prioritized_task_list | Prioritize tasks. |
179,448 | import os
from collections import deque
from typing import Dict, List, Optional, Any
from langchain import LLMChain, OpenAI, PromptTemplate
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import BaseLLM
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain.chains.base import Chain
from langchain.vectorstores import FAISS
from langchain.docstore import InMemoryDocstore
import faiss
def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]:
"""Get the top k tasks based on the query."""
results = vectorstore.similarity_search_with_score(query, k=k)
if not results:
return []
sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
return [str(item.metadata['task']) for item in sorted_results]
The provided code snippet includes necessary dependencies for implementing the `execute_task` function. Write a Python function `def execute_task(vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5) -> str` to solve the following problem:
Execute a task.
Here is the function:
def execute_task(vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5) -> str:
"""Execute a task."""
context = _get_top_tasks(vectorstore, query=objective, k=k)
return execution_chain.run(objective=objective, context=context, task=task) | Execute a task. |
179,449 | from memory_store import MemoryStorage
from disk_store import DiskStorage
class MemoryStorage:
def __init__(self) -> None:
self.data: dict[str, str] = {}
def set(self, key: str, value: str) -> None:
self.data[key] = value
def get(self, key: str) -> str:
return self.data.get(key, "")
def close(self) -> bool:
# NOTE: ideally, I would want this to have () -> None signature, but for some
# reason mypy complains about this:
#
# tests/test_memory_store.py:19: error: "close" of "MemoryStorage" does not
# return a value
#
# check here for more: https://github.com/python/mypy/issues/6549
return True
def memory_db() -> None:
store = MemoryStorage()
print(store.get("name"))
store.set("name", "jojo")
print(store.get("name"), "jojo") | null |
179,450 | from memory_store import MemoryStorage
from disk_store import DiskStorage
class DiskStorage:
"""
Implements the KV store on the disk
Args:
file_name (str): name of the file where all the data will be written. Just
passing the file name will save the data in the current directory. You may
pass the full file location too.
Attributes:
file_name (str): name of the file where all the data will be written. Just
passing the file name will save the data in the current directory. You may
pass the full file location too.
file (typing.BinaryIO): file object pointing the file_name
write_position (int): current cursor position in the file where the data can be
written
key_dir (dict[str, KeyEntry]): is a map of key and KeyEntry being the value.
KeyEntry contains the position of the byte offset in the file where the
value exists. key_dir map acts as in-memory index to fetch the values
quickly from the disk
"""
def __init__(self, file_name: str = "data.db"):
self.file_name: str = file_name
self.write_position: int = 0
self.key_dir: dict[str, KeyEntry] = {}
# if the file exists already, then we will load the key_dir
if os.path.exists(file_name):
self._init_key_dir()
# we open the file in `a+b` mode:
# a - says the writes are append only. `a+` means we want append and read
# b - says that we are operating the file in binary mode (as opposed to the
# default string mode)
self.file: typing.BinaryIO = open(file_name, "a+b")
def set(self, key: str, value: str) -> None:
"""
set stores the key and value on the disk
Args:
key (str): the key
value (str): the value
"""
# The steps to save a KV to disk is simple:
# 1. Encode the KV into bytes
# 2. Write the bytes to disk by appending to the file
# 3. Update KeyDir with the KeyEntry of this key
timestamp: int = int(time.time())
sz, data = encode_kv(timestamp=timestamp, key=key, value=value)
# notice we don't do file seek while writing
self._write(data)
kv: KeyEntry = KeyEntry(
timestamp=timestamp, position=self.write_position, total_size=sz
)
self.key_dir[key] = kv
# update last write position, so that next record can be written from this point
self.write_position += sz
def get(self, key: str) -> str:
"""
get retrieves the value from the disk and returns. If the key does not exist
then it returns an empty string
Args:
key (str): the key
Returns:
string
"""
# How get works?
# 1. Check if there is any KeyEntry record for the key in KeyDir
# 2. Return an empty string if key doesn't exist
# 3. If it exists, then read KeyEntry.total_size bytes starting from the
# KeyEntry.position from the disk
# 4. Decode the bytes into valid KV pair and return the value
kv: typing.Optional[KeyEntry] = self.key_dir.get(key)
if not kv:
return ""
# move the current pointer to the right offset
self.file.seek(kv.position, DEFAULT_WHENCE)
data: bytes = self.file.read(kv.total_size)
_, _, value = decode_kv(data)
return value
def _write(self, data: bytes) -> None:
# saving stuff to a file reliably is hard!
# if you would like to explore and learn more, then
# start from here: https://danluu.com/file-consistency/
# and read this too: https://lwn.net/Articles/457667/
self.file.write(data)
# we need to call flush after every write so that our data is moved from
# runtime buffer to the os buffer
# read more about here: https://docs.python.org/3/library/os.html#os.fsync
self.file.flush()
# calling fsync after every write is important, this assures that our writes
# are actually persisted to the disk
os.fsync(self.file.fileno())
def _init_key_dir(self) -> None:
# we will initialise the key_dir by reading the contents of the file, record by
# record. As we read each record, we will also update our KeyDir with the
# corresponding KeyEntry
#
# NOTE: this method is a blocking one, if the DB size is yuge then it will take
# a lot of time to startup
print("****----------initialising the database----------****")
with open(self.file_name, "rb") as f:
while header_bytes := f.read(HEADER_SIZE):
timestamp, key_size, value_size = decode_header(data=header_bytes)
key_bytes = f.read(key_size)
value_bytes = f.read(value_size)
key = key_bytes.decode("utf-8")
value = value_bytes.decode("utf-8")
total_size = HEADER_SIZE + key_size + value_size
kv = KeyEntry(
timestamp=timestamp,
position=self.write_position,
total_size=total_size,
)
self.key_dir[key] = kv
self.write_position += total_size
print(f"loaded k={key}, v={value}")
print("****----------initialisation complete----------****")
def close(self) -> None:
# before we close the file, we need to safely write the contents in the buffers
# to the disk. Check documentation of DiskStorage._write() to understand
# following the operations
self.file.flush()
os.fsync(self.file.fileno())
self.file.close()
def __setitem__(self, key: str, value: str) -> None:
return self.set(key, value)
def __getitem__(self, item: str) -> str:
return self.get(item)
def store_db() -> None:
store = DiskStorage("data.db")
# on the first run, this will print empty string, but on the next run
# it should print the value from the disk
print(store.get("name"))
store.set("name", "haha")
print(store.get("name"))
store.close() | null |
179,451 | from memory_store import MemoryStorage
from disk_store import DiskStorage
class DiskStorage:
"""
Implements the KV store on the disk
Args:
file_name (str): name of the file where all the data will be written. Just
passing the file name will save the data in the current directory. You may
pass the full file location too.
Attributes:
file_name (str): name of the file where all the data will be written. Just
passing the file name will save the data in the current directory. You may
pass the full file location too.
file (typing.BinaryIO): file object pointing the file_name
write_position (int): current cursor position in the file where the data can be
written
key_dir (dict[str, KeyEntry]): is a map of key and KeyEntry being the value.
KeyEntry contains the position of the byte offset in the file where the
value exists. key_dir map acts as in-memory index to fetch the values
quickly from the disk
"""
def __init__(self, file_name: str = "data.db"):
self.file_name: str = file_name
self.write_position: int = 0
self.key_dir: dict[str, KeyEntry] = {}
# if the file exists already, then we will load the key_dir
if os.path.exists(file_name):
self._init_key_dir()
# we open the file in `a+b` mode:
# a - says the writes are append only. `a+` means we want append and read
# b - says that we are operating the file in binary mode (as opposed to the
# default string mode)
self.file: typing.BinaryIO = open(file_name, "a+b")
def set(self, key: str, value: str) -> None:
"""
set stores the key and value on the disk
Args:
key (str): the key
value (str): the value
"""
# The steps to save a KV to disk is simple:
# 1. Encode the KV into bytes
# 2. Write the bytes to disk by appending to the file
# 3. Update KeyDir with the KeyEntry of this key
timestamp: int = int(time.time())
sz, data = encode_kv(timestamp=timestamp, key=key, value=value)
# notice we don't do file seek while writing
self._write(data)
kv: KeyEntry = KeyEntry(
timestamp=timestamp, position=self.write_position, total_size=sz
)
self.key_dir[key] = kv
# update last write position, so that next record can be written from this point
self.write_position += sz
def get(self, key: str) -> str:
"""
get retrieves the value from the disk and returns. If the key does not exist
then it returns an empty string
Args:
key (str): the key
Returns:
string
"""
# How get works?
# 1. Check if there is any KeyEntry record for the key in KeyDir
# 2. Return an empty string if key doesn't exist
# 3. If it exists, then read KeyEntry.total_size bytes starting from the
# KeyEntry.position from the disk
# 4. Decode the bytes into valid KV pair and return the value
kv: typing.Optional[KeyEntry] = self.key_dir.get(key)
if not kv:
return ""
# move the current pointer to the right offset
self.file.seek(kv.position, DEFAULT_WHENCE)
data: bytes = self.file.read(kv.total_size)
_, _, value = decode_kv(data)
return value
def _write(self, data: bytes) -> None:
# saving stuff to a file reliably is hard!
# if you would like to explore and learn more, then
# start from here: https://danluu.com/file-consistency/
# and read this too: https://lwn.net/Articles/457667/
self.file.write(data)
# we need to call flush after every write so that our data is moved from
# runtime buffer to the os buffer
# read more about here: https://docs.python.org/3/library/os.html#os.fsync
self.file.flush()
# calling fsync after every write is important, this assures that our writes
# are actually persisted to the disk
os.fsync(self.file.fileno())
def _init_key_dir(self) -> None:
# we will initialise the key_dir by reading the contents of the file, record by
# record. As we read each record, we will also update our KeyDir with the
# corresponding KeyEntry
#
# NOTE: this method is a blocking one, if the DB size is yuge then it will take
# a lot of time to startup
print("****----------initialising the database----------****")
with open(self.file_name, "rb") as f:
while header_bytes := f.read(HEADER_SIZE):
timestamp, key_size, value_size = decode_header(data=header_bytes)
key_bytes = f.read(key_size)
value_bytes = f.read(value_size)
key = key_bytes.decode("utf-8")
value = value_bytes.decode("utf-8")
total_size = HEADER_SIZE + key_size + value_size
kv = KeyEntry(
timestamp=timestamp,
position=self.write_position,
total_size=total_size,
)
self.key_dir[key] = kv
self.write_position += total_size
print(f"loaded k={key}, v={value}")
print("****----------initialisation complete----------****")
def close(self) -> None:
# before we close the file, we need to safely write the contents in the buffers
# to the disk. Check documentation of DiskStorage._write() to understand
# following the operations
self.file.flush()
os.fsync(self.file.fileno())
self.file.close()
def __setitem__(self, key: str, value: str) -> None:
return self.set(key, value)
def __getitem__(self, item: str) -> str:
return self.get(item)
def store_books() -> None:
store = DiskStorage("books.db")
books = {
"crime and punishment": "dostoevsky",
"anna karenina": "tolstoy",
"war and peace": "tolstoy",
"hamlet": "shakespeare",
"othello": "shakespeare",
"brave new world": "huxley",
"dune": "frank herbert",
}
for k, v in books.items():
store.set(k, v)
print(f"set k={k}, v={v}")
print(f"get k={k}, v={store.get(k)}")
for k in books:
print(f"get k={k}, v={store.get(k)}")
store.close() | null |
179,452 | import struct
import typing
HEADER_SIZE: typing.Final[int] = 12
def encode_header(timestamp: int, key_size: int, value_size: int) -> bytes:
"""
encode_header encodes the data into bytes using the `HEADER_FORMAT` format
string
Args:
timestamp (int): Timestamp at which we wrote the KV pair to the disk. The value
is current time in seconds since the epoch.
key_size (int): size of the key (cannot exceed the maximum)
value_size (int): size of the value (cannot exceed the maximum)
Returns:
byte object containing the encoded data
Raises:
struct.error when parameters don't match the specific type / size
"""
return struct.pack(HEADER_FORMAT, timestamp, key_size, value_size)
The provided code snippet includes necessary dependencies for implementing the `encode_kv` function. Write a Python function `def encode_kv(timestamp: int, key: str, value: str) -> tuple[int, bytes]` to solve the following problem:
encode_kv encodes the KV pair into bytes Args: timestamp (int): Timestamp at which we wrote the KV pair to the disk. The value is current time in seconds since the epoch. key (str): the key (cannot exceed the maximum size) value (str): the value (cannot exceed the maximum size) Returns: tuple containing the size of encoded bytes and the byte object Raises: struct.error when parameters don't match the specific type / size
Here is the function:
def encode_kv(timestamp: int, key: str, value: str) -> tuple[int, bytes]:
"""
encode_kv encodes the KV pair into bytes
Args:
timestamp (int): Timestamp at which we wrote the KV pair to the disk. The value
is current time in seconds since the epoch.
key (str): the key (cannot exceed the maximum size)
value (str): the value (cannot exceed the maximum size)
Returns:
tuple containing the size of encoded bytes and the byte object
Raises:
struct.error when parameters don't match the specific type / size
"""
header: bytes = encode_header(timestamp, len(key), len(value))
data: bytes = b"".join([str.encode(key), str.encode(value)])
return HEADER_SIZE + len(data), header + data | encode_kv encodes the KV pair into bytes Args: timestamp (int): Timestamp at which we wrote the KV pair to the disk. The value is current time in seconds since the epoch. key (str): the key (cannot exceed the maximum size) value (str): the value (cannot exceed the maximum size) Returns: tuple containing the size of encoded bytes and the byte object Raises: struct.error when parameters don't match the specific type / size |
179,453 | import struct
import typing
HEADER_FORMAT: typing.Final[str] = "<LLL"
HEADER_SIZE: typing.Final[int] = 12
The provided code snippet includes necessary dependencies for implementing the `decode_kv` function. Write a Python function `def decode_kv(data: bytes) -> tuple[int, str, str]` to solve the following problem:
decode_kv decodes the data bytes into appropriate KV pair Args: data (bytes): byte object containing the encoded KV data Returns: A tuple containing: timestamp (int): timestamp in epoch seconds key (str): the key value (str): the value Raises: struct.error: when parameters don't match the specific type / size IndexError: if the length of bytes is shorter than expected UnicodeDecodeError: if the key or values bytes could not be decoded to string
Here is the function:
def decode_kv(data: bytes) -> tuple[int, str, str]:
"""
decode_kv decodes the data bytes into appropriate KV pair
Args:
data (bytes): byte object containing the encoded KV data
Returns:
A tuple containing:
timestamp (int): timestamp in epoch seconds
key (str): the key
value (str): the value
Raises:
struct.error: when parameters don't match the specific type / size
IndexError: if the length of bytes is shorter than expected
UnicodeDecodeError: if the key or values bytes could not be decoded to string
"""
timestamp, key_size, value_size = struct.unpack(HEADER_FORMAT, data[:HEADER_SIZE])
key_bytes: bytes = data[HEADER_SIZE : HEADER_SIZE + key_size]
value_bytes: bytes = data[HEADER_SIZE + key_size :]
key: str = key_bytes.decode("utf-8")
value: str = value_bytes.decode("utf-8")
return timestamp, key, value | decode_kv decodes the data bytes into appropriate KV pair Args: data (bytes): byte object containing the encoded KV data Returns: A tuple containing: timestamp (int): timestamp in epoch seconds key (str): the key value (str): the value Raises: struct.error: when parameters don't match the specific type / size IndexError: if the length of bytes is shorter than expected UnicodeDecodeError: if the key or values bytes could not be decoded to string |
179,454 | import struct
import typing
HEADER_FORMAT: typing.Final[str] = "<LLL"
The provided code snippet includes necessary dependencies for implementing the `decode_header` function. Write a Python function `def decode_header(data: bytes) -> tuple[int, int, int]` to solve the following problem:
decode_header decodes the bytes into header using the `HEADER_FORMAT` format string Args: data (bytes): byte object containing the encoded header data Returns: A tuple containing: timestamp (int): timestamp in epoch seconds key_size (int): size of the key value_size (int): size of the value Raises: struct.error: when parameters don't match the specific type / size
Here is the function:
def decode_header(data: bytes) -> tuple[int, int, int]:
"""
decode_header decodes the bytes into header using the `HEADER_FORMAT` format
string
Args:
data (bytes): byte object containing the encoded header data
Returns:
A tuple containing:
timestamp (int): timestamp in epoch seconds
key_size (int): size of the key
value_size (int): size of the value
Raises:
struct.error: when parameters don't match the specific type / size
"""
timestamp, key_size, value_size = struct.unpack(HEADER_FORMAT, data)
return timestamp, key_size, value_size | decode_header decodes the bytes into header using the `HEADER_FORMAT` format string Args: data (bytes): byte object containing the encoded header data Returns: A tuple containing: timestamp (int): timestamp in epoch seconds key_size (int): size of the key value_size (int): size of the value Raises: struct.error: when parameters don't match the specific type / size |
179,455 | import os.path
import pathlib
import json
from datetime import date
def year_month(date_str):
# extract string of year-month from date, eg: '2023-03'
return str(date_str)[:7] | null |
179,456 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
The provided code snippet includes necessary dependencies for implementing the `message_text` function. Write a Python function `def message_text(message: Message) -> str` to solve the following problem:
Returns the text of a message, excluding any bot commands.
Here is the function:
def message_text(message: Message) -> str:
"""
Returns the text of a message, excluding any bot commands.
"""
message_txt = message.text
if message_txt is None:
return ''
for _, text in sorted(message.parse_entities([MessageEntity.BOT_COMMAND]).items(),
key=(lambda item: item[0].offset)):
message_txt = message_txt.replace(text, '').strip()
return message_txt if len(message_txt) > 0 else '' | Returns the text of a message, excluding any bot commands. |
179,457 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
def is_group_chat(update: Update) -> bool:
"""
Checks if the message was sent from a group chat
"""
if not update.effective_chat:
return False
return update.effective_chat.type in [
constants.ChatType.GROUP,
constants.ChatType.SUPERGROUP
]
The provided code snippet includes necessary dependencies for implementing the `get_stream_cutoff_values` function. Write a Python function `def get_stream_cutoff_values(update: Update, content: str) -> int` to solve the following problem:
Gets the stream cutoff values for the message length
Here is the function:
def get_stream_cutoff_values(update: Update, content: str) -> int:
"""
Gets the stream cutoff values for the message length
"""
if is_group_chat(update):
# group chats have stricter flood limits
return 180 if len(content) > 1000 else 120 if len(content) > 200 \
else 90 if len(content) > 50 else 50
return 90 if len(content) > 1000 else 45 if len(content) > 200 \
else 25 if len(content) > 50 else 15 | Gets the stream cutoff values for the message length |
179,458 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
The provided code snippet includes necessary dependencies for implementing the `split_into_chunks` function. Write a Python function `def split_into_chunks(text: str, chunk_size: int = 4096) -> list[str]` to solve the following problem:
Splits a string into chunks of a given size.
Here is the function:
def split_into_chunks(text: str, chunk_size: int = 4096) -> list[str]:
"""
Splits a string into chunks of a given size.
"""
return [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)] | Splits a string into chunks of a given size. |
179,459 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
def get_thread_id(update: Update) -> int | None:
"""
Gets the message thread id for the update, if any
"""
if update.effective_message and update.effective_message.is_topic_message:
return update.effective_message.message_thread_id
return None
The provided code snippet includes necessary dependencies for implementing the `wrap_with_indicator` function. Write a Python function `async def wrap_with_indicator(update: Update, context: CallbackContext, coroutine, chat_action: constants.ChatAction = "", is_inline=False)` to solve the following problem:
Wraps a coroutine while repeatedly sending a chat action to the user.
Here is the function:
async def wrap_with_indicator(update: Update, context: CallbackContext, coroutine,
chat_action: constants.ChatAction = "", is_inline=False):
"""
Wraps a coroutine while repeatedly sending a chat action to the user.
"""
task = context.application.create_task(coroutine(), update=update)
while not task.done():
if not is_inline:
context.application.create_task(
update.effective_chat.send_action(chat_action, message_thread_id=get_thread_id(update))
)
try:
await asyncio.wait_for(asyncio.shield(task), 4.5)
except asyncio.TimeoutError:
pass | Wraps a coroutine while repeatedly sending a chat action to the user. |
179,460 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
The provided code snippet includes necessary dependencies for implementing the `edit_message_with_retry` function. Write a Python function `async def edit_message_with_retry(context: ContextTypes.DEFAULT_TYPE, chat_id: int | None, message_id: str, text: str, markdown: bool = True, is_inline: bool = False)` to solve the following problem:
Edit a message with retry logic in case of failure (e.g. broken markdown) :param context: The context to use :param chat_id: The chat id to edit the message in :param message_id: The message id to edit :param text: The text to edit the message with :param markdown: Whether to use markdown parse mode :param is_inline: Whether the message to edit is an inline message :return: None
Here is the function:
async def edit_message_with_retry(context: ContextTypes.DEFAULT_TYPE, chat_id: int | None,
message_id: str, text: str, markdown: bool = True, is_inline: bool = False):
"""
Edit a message with retry logic in case of failure (e.g. broken markdown)
:param context: The context to use
:param chat_id: The chat id to edit the message in
:param message_id: The message id to edit
:param text: The text to edit the message with
:param markdown: Whether to use markdown parse mode
:param is_inline: Whether the message to edit is an inline message
:return: None
"""
try:
await context.bot.edit_message_text(
chat_id=chat_id,
message_id=int(message_id) if not is_inline else None,
inline_message_id=message_id if is_inline else None,
text=text,
parse_mode=constants.ParseMode.MARKDOWN if markdown else None,
)
except telegram.error.BadRequest as e:
if str(e).startswith("Message is not modified"):
return
try:
await context.bot.edit_message_text(
chat_id=chat_id,
message_id=int(message_id) if not is_inline else None,
inline_message_id=message_id if is_inline else None,
text=text,
)
except Exception as e:
logging.warning(f'Failed to edit message: {str(e)}')
raise e
except Exception as e:
logging.warning(str(e))
raise e | Edit a message with retry logic in case of failure (e.g. broken markdown) :param context: The context to use :param chat_id: The chat id to edit the message in :param message_id: The message id to edit :param text: The text to edit the message with :param markdown: Whether to use markdown parse mode :param is_inline: Whether the message to edit is an inline message :return: None |
179,461 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
The provided code snippet includes necessary dependencies for implementing the `error_handler` function. Write a Python function `async def error_handler(_: object, context: ContextTypes.DEFAULT_TYPE) -> None` to solve the following problem:
Handles errors in the telegram-python-bot library.
Here is the function:
async def error_handler(_: object, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handles errors in the telegram-python-bot library.
"""
logging.error(f'Exception while handling an update: {context.error}') | Handles errors in the telegram-python-bot library. |
179,462 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
async def is_user_in_group(update: Update, context: CallbackContext, user_id: int) -> bool:
"""
Checks if user_id is a member of the group
"""
try:
chat_member = await context.bot.get_chat_member(update.message.chat_id, user_id)
return chat_member.status in [ChatMember.OWNER, ChatMember.ADMINISTRATOR, ChatMember.MEMBER]
except telegram.error.BadRequest as e:
if str(e) == "User not found":
return False
else:
raise e
except Exception as e:
raise e
def is_group_chat(update: Update) -> bool:
"""
Checks if the message was sent from a group chat
"""
if not update.effective_chat:
return False
return update.effective_chat.type in [
constants.ChatType.GROUP,
constants.ChatType.SUPERGROUP
]
def is_admin(config, user_id: int, log_no_admin=False) -> bool:
"""
Checks if the user is the admin of the bot.
The first user in the user list is the admin.
"""
if config['admin_user_ids'] == '-':
if log_no_admin:
logging.info('No admin user defined.')
return False
admin_user_ids = config['admin_user_ids'].split(',')
# Check if user is in the admin user list
if str(user_id) in admin_user_ids:
return True
return False
The provided code snippet includes necessary dependencies for implementing the `is_allowed` function. Write a Python function `async def is_allowed(config, update: Update, context: CallbackContext, is_inline=False) -> bool` to solve the following problem:
Checks if the user is allowed to use the bot.
Here is the function:
async def is_allowed(config, update: Update, context: CallbackContext, is_inline=False) -> bool:
"""
Checks if the user is allowed to use the bot.
"""
if config['allowed_user_ids'] == '*':
return True
user_id = update.inline_query.from_user.id if is_inline else update.message.from_user.id
if is_admin(config, user_id):
return True
name = update.inline_query.from_user.name if is_inline else update.message.from_user.name
allowed_user_ids = config['allowed_user_ids'].split(',')
# Check if user is allowed
if str(user_id) in allowed_user_ids:
return True
# Check if it's a group a chat with at least one authorized member
if not is_inline and is_group_chat(update):
admin_user_ids = config['admin_user_ids'].split(',')
for user in itertools.chain(allowed_user_ids, admin_user_ids):
if not user.strip():
continue
if await is_user_in_group(update, context, user):
logging.info(f'{user} is a member. Allowing group chat message...')
return True
logging.info(f'Group chat messages from user {name} '
f'(id: {user_id}) are not allowed')
return False | Checks if the user is allowed to use the bot. |
179,463 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
def get_remaining_budget(config, usage, update: Update, is_inline=False) -> float:
"""
Calculate the remaining budget for a user based on their current usage.
:param config: The bot configuration object
:param usage: The usage tracker object
:param update: Telegram update object
:param is_inline: Boolean flag for inline queries
:return: The remaining budget for the user as a float
"""
# Mapping of budget period to cost period
budget_cost_map = {
"monthly": "cost_month",
"daily": "cost_today",
"all-time": "cost_all_time"
}
user_id = update.inline_query.from_user.id if is_inline else update.message.from_user.id
name = update.inline_query.from_user.name if is_inline else update.message.from_user.name
if user_id not in usage:
usage[user_id] = UsageTracker(user_id, name)
# Get budget for users
user_budget = get_user_budget(config, user_id)
budget_period = config['budget_period']
if user_budget is not None:
cost = usage[user_id].get_current_cost()[budget_cost_map[budget_period]]
return user_budget - cost
# Get budget for guests
if 'guests' not in usage:
usage['guests'] = UsageTracker('guests', 'all guest users in group chats')
cost = usage['guests'].get_current_cost()[budget_cost_map[budget_period]]
return config['guest_budget'] - cost
class UsageTracker:
"""
UsageTracker class
Enables tracking of daily/monthly usage per user.
User files are stored as JSON in /usage_logs directory.
JSON example:
{
"user_name": "@user_name",
"current_cost": {
"day": 0.45,
"month": 3.23,
"all_time": 3.23,
"last_update": "2023-03-14"},
"usage_history": {
"chat_tokens": {
"2023-03-13": 520,
"2023-03-14": 1532
},
"transcription_seconds": {
"2023-03-13": 125,
"2023-03-14": 64
},
"number_images": {
"2023-03-12": [0, 2, 3],
"2023-03-13": [1, 2, 3],
"2023-03-14": [0, 1, 2]
}
}
}
"""
def __init__(self, user_id, user_name, logs_dir="usage_logs"):
"""
Initializes UsageTracker for a user with current date.
Loads usage data from usage log file.
:param user_id: Telegram ID of the user
:param user_name: Telegram user name
:param logs_dir: path to directory of usage logs, defaults to "usage_logs"
"""
self.user_id = user_id
self.logs_dir = logs_dir
# path to usage file of given user
self.user_file = f"{logs_dir}/{user_id}.json"
if os.path.isfile(self.user_file):
with open(self.user_file, "r") as file:
self.usage = json.load(file)
if 'vision_tokens' not in self.usage['usage_history']:
self.usage['usage_history']['vision_tokens'] = {}
if 'tts_characters' not in self.usage['usage_history']:
self.usage['usage_history']['tts_characters'] = {}
else:
# ensure directory exists
pathlib.Path(logs_dir).mkdir(exist_ok=True)
# create new dictionary for this user
self.usage = {
"user_name": user_name,
"current_cost": {"day": 0.0, "month": 0.0, "all_time": 0.0, "last_update": str(date.today())},
"usage_history": {"chat_tokens": {}, "transcription_seconds": {}, "number_images": {}, "tts_characters": {}, "vision_tokens":{}}
}
# token usage functions:
def add_chat_tokens(self, tokens, tokens_price=0.002):
"""Adds used tokens from a request to a users usage history and updates current cost
:param tokens: total tokens used in last request
:param tokens_price: price per 1000 tokens, defaults to 0.002
"""
today = date.today()
token_cost = round(float(tokens) * tokens_price / 1000, 6)
self.add_current_costs(token_cost)
# update usage_history
if str(today) in self.usage["usage_history"]["chat_tokens"]:
# add token usage to existing date
self.usage["usage_history"]["chat_tokens"][str(today)] += tokens
else:
# create new entry for current date
self.usage["usage_history"]["chat_tokens"][str(today)] = tokens
# write updated token usage to user file
with open(self.user_file, "w") as outfile:
json.dump(self.usage, outfile)
def get_current_token_usage(self):
"""Get token amounts used for today and this month
:return: total number of tokens used per day and per month
"""
today = date.today()
if str(today) in self.usage["usage_history"]["chat_tokens"]:
usage_day = self.usage["usage_history"]["chat_tokens"][str(today)]
else:
usage_day = 0
month = str(today)[:7] # year-month as string
usage_month = 0
for today, tokens in self.usage["usage_history"]["chat_tokens"].items():
if today.startswith(month):
usage_month += tokens
return usage_day, usage_month
# image usage functions:
def add_image_request(self, image_size, image_prices="0.016,0.018,0.02"):
"""Add image request to users usage history and update current costs.
:param image_size: requested image size
:param image_prices: prices for images of sizes ["256x256", "512x512", "1024x1024"],
defaults to [0.016, 0.018, 0.02]
"""
sizes = ["256x256", "512x512", "1024x1024"]
requested_size = sizes.index(image_size)
image_cost = image_prices[requested_size]
today = date.today()
self.add_current_costs(image_cost)
# update usage_history
if str(today) in self.usage["usage_history"]["number_images"]:
# add token usage to existing date
self.usage["usage_history"]["number_images"][str(today)][requested_size] += 1
else:
# create new entry for current date
self.usage["usage_history"]["number_images"][str(today)] = [0, 0, 0]
self.usage["usage_history"]["number_images"][str(today)][requested_size] += 1
# write updated image number to user file
with open(self.user_file, "w") as outfile:
json.dump(self.usage, outfile)
def get_current_image_count(self):
"""Get number of images requested for today and this month.
:return: total number of images requested per day and per month
"""
today = date.today()
if str(today) in self.usage["usage_history"]["number_images"]:
usage_day = sum(self.usage["usage_history"]["number_images"][str(today)])
else:
usage_day = 0
month = str(today)[:7] # year-month as string
usage_month = 0
for today, images in self.usage["usage_history"]["number_images"].items():
if today.startswith(month):
usage_month += sum(images)
return usage_day, usage_month
# vision usage functions
def add_vision_tokens(self, tokens, vision_token_price=0.01):
"""
Adds requested vision tokens to a users usage history and updates current cost.
:param tokens: total tokens used in last request
:param vision_token_price: price per 1K tokens transcription, defaults to 0.01
"""
today = date.today()
token_price = round(tokens * vision_token_price / 1000, 2)
self.add_current_costs(token_price)
# update usage_history
if str(today) in self.usage["usage_history"]["vision_tokens"]:
# add requested seconds to existing date
self.usage["usage_history"]["vision_tokens"][str(today)] += tokens
else:
# create new entry for current date
self.usage["usage_history"]["vision_tokens"][str(today)] = tokens
# write updated token usage to user file
with open(self.user_file, "w") as outfile:
json.dump(self.usage, outfile)
def get_current_vision_tokens(self):
"""Get vision tokens for today and this month.
:return: total amount of vision tokens per day and per month
"""
today = date.today()
if str(today) in self.usage["usage_history"]["vision_tokens"]:
tokens_day = self.usage["usage_history"]["vision_tokens"][str(today)]
else:
tokens_day = 0
month = str(today)[:7] # year-month as string
tokens_month = 0
for today, tokens in self.usage["usage_history"]["vision_tokens"].items():
if today.startswith(month):
tokens_month += tokens
return tokens_day, tokens_month
# tts usage functions:
def add_tts_request(self, text_length, tts_model, tts_prices):
tts_models = ['tts-1', 'tts-1-hd']
price = tts_prices[tts_models.index(tts_model)]
today = date.today()
tts_price = round(text_length * price / 1000, 2)
self.add_current_costs(tts_price)
if 'tts_characters' not in self.usage['usage_history']:
self.usage['usage_history']['tts_characters'] = {}
if tts_model not in self.usage['usage_history']['tts_characters']:
self.usage['usage_history']['tts_characters'][tts_model] = {}
# update usage_history
if str(today) in self.usage["usage_history"]["tts_characters"][tts_model]:
# add requested text length to existing date
self.usage["usage_history"]["tts_characters"][tts_model][str(today)] += text_length
else:
# create new entry for current date
self.usage["usage_history"]["tts_characters"][tts_model][str(today)] = text_length
# write updated token usage to user file
with open(self.user_file, "w") as outfile:
json.dump(self.usage, outfile)
def get_current_tts_usage(self):
"""Get length of speech generated for today and this month.
:return: total amount of characters converted to speech per day and per month
"""
tts_models = ['tts-1', 'tts-1-hd']
today = date.today()
characters_day = 0
for tts_model in tts_models:
if tts_model in self.usage["usage_history"]["tts_characters"] and \
str(today) in self.usage["usage_history"]["tts_characters"][tts_model]:
characters_day += self.usage["usage_history"]["tts_characters"][tts_model][str(today)]
month = str(today)[:7] # year-month as string
characters_month = 0
for tts_model in tts_models:
if tts_model in self.usage["usage_history"]["tts_characters"]:
for today, characters in self.usage["usage_history"]["tts_characters"][tts_model].items():
if today.startswith(month):
characters_month += characters
return int(characters_day), int(characters_month)
# transcription usage functions:
def add_transcription_seconds(self, seconds, minute_price=0.006):
"""Adds requested transcription seconds to a users usage history and updates current cost.
:param seconds: total seconds used in last request
:param minute_price: price per minute transcription, defaults to 0.006
"""
today = date.today()
transcription_price = round(seconds * minute_price / 60, 2)
self.add_current_costs(transcription_price)
# update usage_history
if str(today) in self.usage["usage_history"]["transcription_seconds"]:
# add requested seconds to existing date
self.usage["usage_history"]["transcription_seconds"][str(today)] += seconds
else:
# create new entry for current date
self.usage["usage_history"]["transcription_seconds"][str(today)] = seconds
# write updated token usage to user file
with open(self.user_file, "w") as outfile:
json.dump(self.usage, outfile)
def add_current_costs(self, request_cost):
"""
Add current cost to all_time, day and month cost and update last_update date.
"""
today = date.today()
last_update = date.fromisoformat(self.usage["current_cost"]["last_update"])
# add to all_time cost, initialize with calculation of total_cost if key doesn't exist
self.usage["current_cost"]["all_time"] = \
self.usage["current_cost"].get("all_time", self.initialize_all_time_cost()) + request_cost
# add current cost, update new day
if today == last_update:
self.usage["current_cost"]["day"] += request_cost
self.usage["current_cost"]["month"] += request_cost
else:
if today.month == last_update.month:
self.usage["current_cost"]["month"] += request_cost
else:
self.usage["current_cost"]["month"] = request_cost
self.usage["current_cost"]["day"] = request_cost
self.usage["current_cost"]["last_update"] = str(today)
def get_current_transcription_duration(self):
"""Get minutes and seconds of audio transcribed for today and this month.
:return: total amount of time transcribed per day and per month (4 values)
"""
today = date.today()
if str(today) in self.usage["usage_history"]["transcription_seconds"]:
seconds_day = self.usage["usage_history"]["transcription_seconds"][str(today)]
else:
seconds_day = 0
month = str(today)[:7] # year-month as string
seconds_month = 0
for today, seconds in self.usage["usage_history"]["transcription_seconds"].items():
if today.startswith(month):
seconds_month += seconds
minutes_day, seconds_day = divmod(seconds_day, 60)
minutes_month, seconds_month = divmod(seconds_month, 60)
return int(minutes_day), round(seconds_day, 2), int(minutes_month), round(seconds_month, 2)
# general functions
def get_current_cost(self):
"""Get total USD amount of all requests of the current day and month
:return: cost of current day and month
"""
today = date.today()
last_update = date.fromisoformat(self.usage["current_cost"]["last_update"])
if today == last_update:
cost_day = self.usage["current_cost"]["day"]
cost_month = self.usage["current_cost"]["month"]
else:
cost_day = 0.0
if today.month == last_update.month:
cost_month = self.usage["current_cost"]["month"]
else:
cost_month = 0.0
# add to all_time cost, initialize with calculation of total_cost if key doesn't exist
cost_all_time = self.usage["current_cost"].get("all_time", self.initialize_all_time_cost())
return {"cost_today": cost_day, "cost_month": cost_month, "cost_all_time": cost_all_time}
def initialize_all_time_cost(self, tokens_price=0.002, image_prices="0.016,0.018,0.02", minute_price=0.006, vision_token_price=0.01, tts_prices='0.015,0.030'):
"""Get total USD amount of all requests in history
:param tokens_price: price per 1000 tokens, defaults to 0.002
:param image_prices: prices for images of sizes ["256x256", "512x512", "1024x1024"],
defaults to [0.016, 0.018, 0.02]
:param minute_price: price per minute transcription, defaults to 0.006
:param vision_token_price: price per 1K vision token interpretation, defaults to 0.01
:param tts_prices: price per 1K characters tts per model ['tts-1', 'tts-1-hd'], defaults to [0.015, 0.030]
:return: total cost of all requests
"""
total_tokens = sum(self.usage['usage_history']['chat_tokens'].values())
token_cost = round(total_tokens * tokens_price / 1000, 6)
total_images = [sum(values) for values in zip(*self.usage['usage_history']['number_images'].values())]
image_prices_list = [float(x) for x in image_prices.split(',')]
image_cost = sum([count * price for count, price in zip(total_images, image_prices_list)])
total_transcription_seconds = sum(self.usage['usage_history']['transcription_seconds'].values())
transcription_cost = round(total_transcription_seconds * minute_price / 60, 2)
total_vision_tokens = sum(self.usage['usage_history']['vision_tokens'].values())
vision_cost = round(total_vision_tokens * vision_token_price / 1000, 2)
total_characters = [sum(tts_model.values()) for tts_model in self.usage['usage_history']['tts_characters'].values()]
tts_prices_list = [float(x) for x in tts_prices.split(',')]
tts_cost = round(sum([count * price / 1000 for count, price in zip(total_characters, tts_prices_list)]), 2)
all_time_cost = token_cost + transcription_cost + image_cost + vision_cost + tts_cost
return all_time_cost
The provided code snippet includes necessary dependencies for implementing the `is_within_budget` function. Write a Python function `def is_within_budget(config, usage, update: Update, is_inline=False) -> bool` to solve the following problem:
Checks if the user reached their usage limit. Initializes UsageTracker for user and guest when needed. :param config: The bot configuration object :param usage: The usage tracker object :param update: Telegram update object :param is_inline: Boolean flag for inline queries :return: Boolean indicating if the user has a positive budget
Here is the function:
def is_within_budget(config, usage, update: Update, is_inline=False) -> bool:
"""
Checks if the user reached their usage limit.
Initializes UsageTracker for user and guest when needed.
:param config: The bot configuration object
:param usage: The usage tracker object
:param update: Telegram update object
:param is_inline: Boolean flag for inline queries
:return: Boolean indicating if the user has a positive budget
"""
user_id = update.inline_query.from_user.id if is_inline else update.message.from_user.id
name = update.inline_query.from_user.name if is_inline else update.message.from_user.name
if user_id not in usage:
usage[user_id] = UsageTracker(user_id, name)
remaining_budget = get_remaining_budget(config, usage, update, is_inline=is_inline)
return remaining_budget > 0 | Checks if the user reached their usage limit. Initializes UsageTracker for user and guest when needed. :param config: The bot configuration object :param usage: The usage tracker object :param update: Telegram update object :param is_inline: Boolean flag for inline queries :return: Boolean indicating if the user has a positive budget |
179,464 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
The provided code snippet includes necessary dependencies for implementing the `add_chat_request_to_usage_tracker` function. Write a Python function `def add_chat_request_to_usage_tracker(usage, config, user_id, used_tokens)` to solve the following problem:
Add chat request to usage tracker :param usage: The usage tracker object :param config: The bot configuration object :param user_id: The user id :param used_tokens: The number of tokens used
Here is the function:
def add_chat_request_to_usage_tracker(usage, config, user_id, used_tokens):
"""
Add chat request to usage tracker
:param usage: The usage tracker object
:param config: The bot configuration object
:param user_id: The user id
:param used_tokens: The number of tokens used
"""
try:
if int(used_tokens) == 0:
logging.warning('No tokens used. Not adding chat request to usage tracker.')
return
# add chat request to users usage tracker
usage[user_id].add_chat_tokens(used_tokens, config['token_price'])
# add guest chat request to guest usage tracker
allowed_user_ids = config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in usage:
usage["guests"].add_chat_tokens(used_tokens, config['token_price'])
except Exception as e:
logging.warning(f'Failed to add tokens to usage_logs: {str(e)}')
pass | Add chat request to usage tracker :param usage: The usage tracker object :param config: The bot configuration object :param user_id: The user id :param used_tokens: The number of tokens used |
179,465 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
The provided code snippet includes necessary dependencies for implementing the `is_direct_result` function. Write a Python function `def is_direct_result(response: any) -> bool` to solve the following problem:
Checks if the dict contains a direct result that can be sent directly to the user :param response: The response value :return: Boolean indicating if the result is a direct result
Here is the function:
def is_direct_result(response: any) -> bool:
"""
Checks if the dict contains a direct result that can be sent directly to the user
:param response: The response value
:return: Boolean indicating if the result is a direct result
"""
if type(response) is not dict:
try:
json_response = json.loads(response)
return json_response.get('direct_result', False)
except:
return False
else:
return response.get('direct_result', False) | Checks if the dict contains a direct result that can be sent directly to the user :param response: The response value :return: Boolean indicating if the result is a direct result |
179,466 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
def get_thread_id(update: Update) -> int | None:
"""
Gets the message thread id for the update, if any
"""
if update.effective_message and update.effective_message.is_topic_message:
return update.effective_message.message_thread_id
return None
def get_reply_to_message_id(config, update: Update):
"""
Returns the message id of the message to reply to
:param config: Bot configuration object
:param update: Telegram update object
:return: Message id of the message to reply to, or None if quoting is disabled
"""
if config['enable_quoting'] or is_group_chat(update):
return update.message.message_id
return None
def cleanup_intermediate_files(response: any):
"""
Deletes intermediate files created by plugins
"""
if type(response) is not dict:
response = json.loads(response)
result = response['direct_result']
format = result['format']
value = result['value']
if format == 'path':
if os.path.exists(value):
os.remove(value)
The provided code snippet includes necessary dependencies for implementing the `handle_direct_result` function. Write a Python function `async def handle_direct_result(config, update: Update, response: any)` to solve the following problem:
Handles a direct result from a plugin
Here is the function:
async def handle_direct_result(config, update: Update, response: any):
"""
Handles a direct result from a plugin
"""
if type(response) is not dict:
response = json.loads(response)
result = response['direct_result']
kind = result['kind']
format = result['format']
value = result['value']
common_args = {
'message_thread_id': get_thread_id(update),
'reply_to_message_id': get_reply_to_message_id(config, update),
}
if kind == 'photo':
if format == 'url':
await update.effective_message.reply_photo(**common_args, photo=value)
elif format == 'path':
await update.effective_message.reply_photo(**common_args, photo=open(value, 'rb'))
elif kind == 'gif' or kind == 'file':
if format == 'url':
await update.effective_message.reply_document(**common_args, document=value)
if format == 'path':
await update.effective_message.reply_document(**common_args, document=open(value, 'rb'))
elif kind == 'dice':
await update.effective_message.reply_dice(**common_args, emoji=value)
if format == 'path':
cleanup_intermediate_files(response) | Handles a direct result from a plugin |
179,467 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
def encode_image(fileobj):
image = base64.b64encode(fileobj.getvalue()).decode('utf-8')
return f'data:image/jpeg;base64,{image}' | null |
179,468 | from __future__ import annotations
import asyncio
import itertools
import json
import logging
import os
import base64
import telegram
from telegram import Message, MessageEntity, Update, ChatMember, constants
from telegram.ext import CallbackContext, ContextTypes
from usage_tracker import UsageTracker
def decode_image(imgbase64):
image = imgbase64[len('data:image/jpeg;base64,'):]
return base64.b64decode(image) | null |
179,469 | from __future__ import annotations
import datetime
import logging
import os
import tiktoken
import openai
import requests
import json
import httpx
import io
from datetime import date
from calendar import monthrange
from PIL import Image
from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
from utils import is_direct_result, encode_image, decode_image
from plugin_manager import PluginManager
GPT_3_MODELS = ("gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613")
GPT_3_16K_MODELS = ("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125")
GPT_4_MODELS = ("gpt-4", "gpt-4-0314", "gpt-4-0613")
GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613")
GPT_4_VISION_MODELS = ("gpt-4-vision-preview",)
GPT_4_128K_MODELS = ("gpt-4-1106-preview","gpt-4-0125-preview","gpt-4-turbo-preview")
The provided code snippet includes necessary dependencies for implementing the `default_max_tokens` function. Write a Python function `def default_max_tokens(model: str) -> int` to solve the following problem:
Gets the default number of max tokens for the given model. :param model: The model name :return: The default number of max tokens
Here is the function:
def default_max_tokens(model: str) -> int:
"""
Gets the default number of max tokens for the given model.
:param model: The model name
:return: The default number of max tokens
"""
base = 1200
if model in GPT_3_MODELS:
return base
elif model in GPT_4_MODELS:
return base * 2
elif model in GPT_3_16K_MODELS:
if model == "gpt-3.5-turbo-1106":
return 4096
return base * 4
elif model in GPT_4_32K_MODELS:
return base * 8
elif model in GPT_4_VISION_MODELS:
return 4096
elif model in GPT_4_128K_MODELS:
return 4096 | Gets the default number of max tokens for the given model. :param model: The model name :return: The default number of max tokens |
179,470 | from __future__ import annotations
import datetime
import logging
import os
import tiktoken
import openai
import requests
import json
import httpx
import io
from datetime import date
from calendar import monthrange
from PIL import Image
from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
from utils import is_direct_result, encode_image, decode_image
from plugin_manager import PluginManager
The provided code snippet includes necessary dependencies for implementing the `are_functions_available` function. Write a Python function `def are_functions_available(model: str) -> bool` to solve the following problem:
Whether the given model supports functions
Here is the function:
def are_functions_available(model: str) -> bool:
"""
Whether the given model supports functions
"""
# Deprecated models
if model in ("gpt-3.5-turbo-0301", "gpt-4-0314", "gpt-4-32k-0314"):
return False
# Stable models will be updated to support functions on June 27, 2023
if model in ("gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-4", "gpt-4-32k","gpt-4-1106-preview","gpt-4-0125-preview","gpt-4-turbo-preview"):
return datetime.date.today() > datetime.date(2023, 6, 27)
# Models gpt-3.5-turbo-0613 and gpt-3.5-turbo-16k-0613 will be deprecated on June 13, 2024
if model in ("gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613"):
return datetime.date.today() < datetime.date(2024, 6, 13)
if model == 'gpt-4-vision-preview':
return False
return True | Whether the given model supports functions |
179,471 | from __future__ import annotations
import datetime
import logging
import os
import tiktoken
import openai
import requests
import json
import httpx
import io
from datetime import date
from calendar import monthrange
from PIL import Image
from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
from utils import is_direct_result, encode_image, decode_image
from plugin_manager import PluginManager
The provided code snippet includes necessary dependencies for implementing the `localized_text` function. Write a Python function `def localized_text(key, bot_language)` to solve the following problem:
Return translated text for a key in specified bot_language. Keys and translations can be found in the translations.json.
Here is the function:
def localized_text(key, bot_language):
"""
Return translated text for a key in specified bot_language.
Keys and translations can be found in the translations.json.
"""
try:
return translations[bot_language][key]
except KeyError:
logging.warning(f"No translation available for bot_language code '{bot_language}' and key '{key}'")
# Fallback to English if the translation is not available
if key in translations['en']:
return translations['en'][key]
else:
logging.warning(f"No english definition found for key '{key}' in translations.json")
# return key as text
return key | Return translated text for a key in specified bot_language. Keys and translations can be found in the translations.json. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.